diff options
Diffstat (limited to 'drivers/net/i40e')
-rw-r--r-- | drivers/net/i40e/Makefile | 7 | ||||
-rw-r--r-- | drivers/net/i40e/base/i40e_osdep.h | 3 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_ethdev.c | 1128 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_ethdev.h | 231 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_ethdev_vf.c | 454 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_fdir.c | 683 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_flow.c | 519 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_pf.c | 130 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_pf.h | 60 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx.c | 200 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx.h | 5 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx_vec_altivec.c | 6 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx_vec_neon.c | 27 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx_vec_sse.c | 8 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_tm.c | 108 | ||||
-rw-r--r-- | drivers/net/i40e/rte_pmd_i40e.c | 940 | ||||
-rw-r--r-- | drivers/net/i40e/rte_pmd_i40e.h | 346 | ||||
-rw-r--r-- | drivers/net/i40e/rte_pmd_i40e_version.map | 13 |
18 files changed, 3723 insertions, 1145 deletions
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile index 55c79a60..9ab8c84d 100644 --- a/drivers/net/i40e/Makefile +++ b/drivers/net/i40e/Makefile @@ -39,10 +39,13 @@ LIB = librte_pmd_i40e.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF CFLAGS += -DX722_A0_SUPPORT +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash +LDLIBS += -lrte_bus_pci EXPORT_MAP := rte_pmd_i40e_version.map -LIBABIVER := 1 +LIBABIVER := 2 # # Add extra flags for base driver files (also known as shared code) @@ -78,7 +81,7 @@ endif CFLAGS_i40e_lan_hmc.o += -Wno-error endif -OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) $(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) VPATH += $(SRCDIR)/base diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h index c57ecded..8e5c593c 100644 --- a/drivers/net/i40e/base/i40e_osdep.h +++ b/drivers/net/i40e/base/i40e_osdep.h @@ -35,6 +35,7 @@ #include <string.h> #include <stdint.h> +#include <stdbool.h> #include <stdio.h> #include <stdarg.h> @@ -57,7 +58,6 @@ typedef uint16_t u16; typedef uint32_t u32; typedef int32_t s32; typedef uint64_t u64; -typedef int bool; typedef enum i40e_status_code i40e_status; #define __iomem @@ -99,7 +99,6 @@ typedef enum i40e_status_code i40e_status; #define max(a,b) RTE_MAX(a,b) #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) -#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x") #define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S) #define DEBUGOUT1(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 5f26e24a..811cc9ff 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -43,6 +43,7 @@ #include <rte_eal.h> #include <rte_string_fns.h> #include <rte_pci.h> +#include <rte_bus_pci.h> #include <rte_ether.h> #include <rte_ethdev.h> #include <rte_ethdev_pci.h> @@ -65,6 +66,7 @@ #include "i40e_rxtx.h" #include "i40e_pf.h" #include "i40e_regs.h" +#include "rte_pmd_i40e.h" #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" @@ -86,12 +88,6 @@ /* Flow control default timer */ #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU -/* Flow control default high water */ -#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024) - -/* Flow control default low water */ -#define I40E_DEFAULT_LOW_WATER (0x1A40/1024) - /* Flow control enable fwd bit */ #define I40E_PRTMAC_FWD_CTRL 0x00000001 @@ -101,6 +97,12 @@ /* Kilobytes shift */ #define I40E_KILOSHIFT 10 +/* Flow control default high water */ +#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT) + +/* Flow control default low water */ +#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT) + /* Receive Average Packet Size in Byte*/ #define I40E_PACKET_AVERAGE_SIZE 128 @@ -137,10 +139,6 @@ #define I40E_PRTTSYN_TSYNTYPE 0x0e000000 #define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL -#define I40E_MAX_PERCENT 100 -#define I40E_DEFAULT_DCB_APP_NUM 1 -#define I40E_DEFAULT_DCB_APP_PRIO 3 - /** * Below are values for writing un-exposed registers suggested * by silicon experts @@ -250,13 +248,14 @@ static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); static void i40e_dev_stop(struct rte_eth_dev *dev); static void i40e_dev_close(struct rte_eth_dev *dev); +static int i40e_dev_reset(struct rte_eth_dev *dev); static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev); static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev); static int i40e_dev_set_link_up(struct rte_eth_dev *dev); static int i40e_dev_set_link_down(struct rte_eth_dev *dev); -static void i40e_dev_stats_get(struct rte_eth_dev *dev, +static int i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n); @@ -278,7 +277,7 @@ static int i40e_vlan_filter_set(struct rte_eth_dev *dev, static int i40e_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, uint16_t tpid); -static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); @@ -308,7 +307,6 @@ static int i40e_pf_parameter_init(struct rte_eth_dev *dev); static int i40e_pf_setup(struct i40e_pf *pf); static int i40e_dev_rxtx_init(struct i40e_pf *pf); static int i40e_vmdq_setup(struct rte_eth_dev *dev); -static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); static int i40e_dcb_setup(struct rte_eth_dev *dev); static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg, bool offset_loaded, uint64_t *offset, uint64_t *stat); @@ -360,6 +358,12 @@ static int i40e_dev_sync_phy_type(struct i40e_hw *hw); static void i40e_configure_registers(struct i40e_hw *hw); static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); +static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, + uint16_t seid, + uint16_t rule_type, + uint16_t *entries, + uint16_t count, + uint16_t rule_id); static int i40e_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t sw_id, uint8_t on); @@ -449,6 +453,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .dev_start = i40e_dev_start, .dev_stop = i40e_dev_stop, .dev_close = i40e_dev_close, + .dev_reset = i40e_dev_reset, .promiscuous_enable = i40e_dev_promiscuous_enable, .promiscuous_disable = i40e_dev_promiscuous_disable, .allmulticast_enable = i40e_dev_allmulticast_enable, @@ -645,7 +650,8 @@ static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_i40e_pmd = { .id_table = pci_id_i40e_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_i40e_pci_probe, .remove = eth_i40e_pci_remove, }; @@ -695,23 +701,22 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci"); static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) { /* - * Initialize registers for flexible payload, which should be set by NVM. - * This should be removed from code once it is fixed in NVM. + * Force global configuration for flexible payload + * to the first 16 bytes of the corresponding L2/L3/L4 paylod. + * This should be removed from code once proper + * configuration API is added to avoid configuration conflicts + * between ports of the same device. */ - I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B); I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0); I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3); I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D); - I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480); - I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440); - /* Initialize registers for parsing packet type of QinQ */ + /* + * Initialize registers for parsing packet type of QinQ + * This should be removed from code once proper + * configuration API is added to avoid configuration conflicts + * between ports of the same device. + */ I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029); I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420); } @@ -1034,6 +1039,35 @@ err_fdir_hash_map_alloc: return ret; } +static void +i40e_init_customized_info(struct i40e_pf *pf) +{ + int i; + + /* Initialize customized pctype */ + for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) { + pf->customized_pctype[i].index = i; + pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID; + pf->customized_pctype[i].valid = false; + } + + pf->gtp_support = false; +} + +void +i40e_init_queue_region_conf(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_queue_regions *info = &pf->queue_region; + uint16_t i; + + for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0); + + memset(info, 0, sizeof(struct i40e_queue_regions)); +} + static int eth_i40e_dev_init(struct rte_eth_dev *dev) { @@ -1062,11 +1096,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) return 0; } i40e_set_default_ptype_table(dev); + i40e_set_default_pctype_table(dev); pci_dev = RTE_ETH_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); - dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); pf->adapter->eth_dev = dev; @@ -1299,6 +1333,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialize Traffic Manager configuration */ i40e_tm_conf_init(dev); + /* Initialize customized information */ + i40e_init_customized_info(pf); + ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) goto err_init_ethtype_filter_list; @@ -1309,6 +1346,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) if (ret < 0) goto err_init_fdir_filter_list; + /* initialize queue region configuration */ + i40e_init_queue_region_conf(dev); + return 0; err_init_fdir_filter_list: @@ -1594,7 +1634,8 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) static void __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, - int base_queue, int nb_queue) + int base_queue, int nb_queue, + uint16_t itr_idx) { int i; uint32_t val; @@ -1603,7 +1644,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, /* Bind all RX queues to allocated MSIX interrupt */ for (i = 0; i < nb_queue; i++) { val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | - I40E_QINT_RQCTL_ITR_INDX_MASK | + itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT | ((base_queue + i + 1) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | @@ -1666,7 +1707,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, } void -i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) +i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -1694,7 +1735,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { __vsi_queues_bind_intr(vsi, msix_vect, - vsi->base_queue, vsi->nb_qps); + vsi->base_queue, vsi->nb_qps, + itr_idx); return; } @@ -1720,7 +1762,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) /* no enough msix_vect, map all to one */ __vsi_queues_bind_intr(vsi, msix_vect, vsi->base_queue + i, - vsi->nb_used_qps - i); + vsi->nb_used_qps - i, + itr_idx); for (; !!record && i < vsi->nb_used_qps; i++) intr_handle->intr_vec[queue_idx + i] = msix_vect; @@ -1728,7 +1771,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) } /* 1:1 queue/msix_vect mapping */ __vsi_queues_bind_intr(vsi, msix_vect, - vsi->base_queue + i, 1); + vsi->base_queue + i, 1, + itr_idx); if (!!record) intr_handle->intr_vec[queue_idx + i] = msix_vect; @@ -1918,8 +1962,9 @@ i40e_dev_start(struct rte_eth_dev *dev) hw->adapter_stopped = 0; if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled", - dev->data->port_id); + PMD_INIT_LOG(ERR, + "Invalid link_speeds for port %u, autonegotiation disabled", + dev->data->port_id); return -EINVAL; } @@ -1957,19 +2002,21 @@ i40e_dev_start(struct rte_eth_dev *dev) /* Map queues with MSIX interrupt */ main_vsi->nb_used_qps = dev->data->nb_rx_queues - pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(main_vsi); + i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); i40e_vsi_enable_queues_intr(main_vsi); /* Map VMDQ VSI queues with MSIX interrupt */ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi); + i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, + I40E_ITR_INDEX_DEFAULT); i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } /* enable FDIR MSIX interrupt */ if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); + i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi, + I40E_ITR_INDEX_NONE); i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); } @@ -2063,7 +2110,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; - struct i40e_mirror_rule *p_mirror; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int i; @@ -2092,13 +2138,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) /* Set link down */ i40e_dev_set_link_down(dev); - /* Remove all mirror rules */ - while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { - TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); - rte_free(p_mirror); - } - pf->nb_mirror_rule = 0; - if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ rte_intr_callback_register(intr_handle, @@ -2115,6 +2154,9 @@ i40e_dev_stop(struct rte_eth_dev *dev) /* reset hierarchy commit */ pf->tm_conf.committed = false; + /* Remove all the queue region configuration */ + i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + hw->adapter_stopped = 1; } @@ -2125,12 +2167,34 @@ i40e_dev_close(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_mirror_rule *p_mirror; uint32_t reg; int i; + int ret; PMD_INIT_FUNC_TRACE(); i40e_dev_stop(dev); + + /* Remove all mirror rules */ + while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { + ret = i40e_aq_del_mirror_rule(hw, + pf->main_vsi->veb->seid, + p_mirror->rule_type, + p_mirror->entries, + p_mirror->num_entries, + p_mirror->id); + if (ret < 0) + PMD_DRV_LOG(ERR, "failed to remove mirror rule: " + "status = %d, aq_err = %d.", ret, + hw->aq.asq_last_status); + + /* remove mirror software resource anyway */ + TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); + rte_free(p_mirror); + pf->nb_mirror_rule--; + } + i40e_dev_free_queues(dev); /* Disable interrupt */ @@ -2165,6 +2229,32 @@ i40e_dev_close(struct rte_eth_dev *dev) I40E_WRITE_FLUSH(hw); } +/* + * Reset PF device only to re-initialize resources in PMD layer + */ +static int +i40e_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to i40e PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_i40e_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_i40e_dev_init(dev); + + return ret; +} + static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) { @@ -2653,7 +2743,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) } /* Get all statistics of a port */ -static void +static int i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2664,13 +2754,14 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* call read registers - updates values, now write them to struct */ i40e_read_stats_registers(pf, hw); - stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + - pf->main_vsi->eth_stats.rx_multicast + - pf->main_vsi->eth_stats.rx_broadcast - + stats->ipackets = ns->eth.rx_unicast + + ns->eth.rx_multicast + + ns->eth.rx_broadcast - + ns->eth.rx_discards - pf->main_vsi->eth_stats.rx_discards; - stats->opackets = pf->main_vsi->eth_stats.tx_unicast + - pf->main_vsi->eth_stats.tx_multicast + - pf->main_vsi->eth_stats.tx_broadcast; + stats->opackets = ns->eth.tx_unicast + + ns->eth.tx_multicast + + ns->eth.tx_broadcast; stats->ibytes = ns->eth.rx_bytes; stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + @@ -2752,6 +2843,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ns->checksum_error); PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match); PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************"); + return 0; } /* Reset the statistics */ @@ -2965,7 +3057,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); dev_info->reta_size = pf->hash_lut_size; - dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; + dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3129,7 +3221,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, return ret; } -static void +static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -3162,6 +3254,8 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) else i40e_vsi_config_double_vlan(vsi, FALSE); } + + return 0; } static void @@ -3225,6 +3319,13 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); fc_conf->pause_time = pf->fc_conf.pause_time; + + /* read out from register, in case they are modified by other port */ + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT; + fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]; fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]; @@ -3400,7 +3501,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, return -EINVAL; } - (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); + rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); if (dev->data->dev_conf.rxmode.hw_vlan_filter) mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; else @@ -3505,10 +3606,10 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } if (add) { - (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); - (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes, + rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); + rte_memcpy(hw->mac.addr, new_mac->addr_bytes, ETHER_ADDR_LEN); - (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, + rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, ETHER_ADDR_LEN); mac_filter.filter_type = filter->filter_type; @@ -3519,7 +3620,7 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf, } ether_addr_copy(new_mac, &pf->dev_addr); } else { - (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, + rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); if (ret != I40E_SUCCESS) { @@ -3741,7 +3842,7 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, mem->size = size; mem->va = mz->addr; - mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + mem->pa = mz->iova; mem->zone = (const void *)mz; PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: %"PRIu64, @@ -4311,7 +4412,7 @@ i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, vsi->info.valid_sections = rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; hw = I40E_VSI_TO_HW(vsi); @@ -4350,7 +4451,7 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap) return ret; } - (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, + rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, sizeof(vsi->info.qs_handle)); return I40E_SUCCESS; } @@ -4607,7 +4708,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) if (vsi->type != I40E_VSI_MAIN) return I40E_ERR_CONFIG; memset(&def_filter, 0, sizeof(def_filter)); - (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, + rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, ETH_ADDR_LEN); def_filter.vlan_tag = 0; def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | @@ -4626,7 +4727,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return I40E_ERR_NO_MEMORY; } mac = &f->mac_info.mac_addr; - (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, + rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); @@ -4634,7 +4735,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return ret; } - (void)rte_memcpy(&filter.mac_addr, + rte_memcpy(&filter.mac_addr, (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; return i40e_vsi_add_mac(vsi, &filter); @@ -4895,7 +4996,7 @@ i40e_vsi_setup(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "Failed to get VSI params"); goto fail_msix_alloc; } - (void)rte_memcpy(&vsi->info, &ctxt.info, + rte_memcpy(&vsi->info, &ctxt.info, sizeof(struct i40e_aqc_vsi_properties_data)); vsi->vsi_id = ctxt.vsi_number; vsi->info.valid_sections = 0; @@ -4913,7 +5014,7 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; - (void)rte_memcpy(&ctxt.info, &vsi->info, + rte_memcpy(&ctxt.info, &vsi->info, sizeof(struct i40e_aqc_vsi_properties_data)); ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); @@ -4934,15 +5035,15 @@ i40e_vsi_setup(struct i40e_pf *pf, goto fail_msix_alloc; } - (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, sizeof(vsi->info.tc_mapping)); - (void)rte_memcpy(&vsi->info.queue_mapping, + rte_memcpy(&vsi->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(vsi->info.queue_mapping)); vsi->info.mapping_flags = ctxt.info.mapping_flags; vsi->info.valid_sections = 0; - (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, + rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, ETH_ADDR_LEN); /** @@ -5085,7 +5186,7 @@ i40e_vsi_setup(struct i40e_pf *pf, } /* MAC/VLAN configuration */ - (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); @@ -5197,7 +5298,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on) vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK); vsi->info.port_vlan_flags |= vlan_flags; ctxt.seid = vsi->seid; - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping", @@ -5215,7 +5316,11 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev) /* Apply vlan offload setting */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; - i40e_vlan_offload_set(dev, mask); + ret = i40e_vlan_offload_set(dev, mask); + if (ret) { + PMD_DRV_LOG(INFO, "Failed to update vlan offload"); + return ret; + } /* Apply double-vlan setting, not implemented yet */ @@ -5991,7 +6096,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, memset(req_list, 0, ele_buff_size); for (i = 0; i < actual_num; i++) { - (void)rte_memcpy(req_list[i].mac_addr, + rte_memcpy(req_list[i].mac_addr, &filter[num + i].macaddr, ETH_ADDR_LEN); req_list[i].vlan_tag = rte_cpu_to_le_16(filter[num + i].vlan_id); @@ -6066,7 +6171,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, memset(req_list, 0, ele_buff_size); for (i = 0; i < actual_num; i++) { - (void)rte_memcpy(req_list[i].mac_addr, + rte_memcpy(req_list[i].mac_addr, &filter[num + i].macaddr, ETH_ADDR_LEN); req_list[i].vlan_tag = rte_cpu_to_le_16(filter[num + i].vlan_id); @@ -6217,7 +6322,7 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, "vlan number doesn't match"); return I40E_ERR_PARAM; } - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, addr, ETH_ADDR_LEN); mv_f[i].vlan_id = j * I40E_UINT32_BIT_SIZE + k; @@ -6246,7 +6351,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, PMD_DRV_LOG(ERR, "buffer number not match"); return I40E_ERR_PARAM; } - (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); mv_f[i].vlan_id = vlan; mv_f[i].filter_type = f->mac_info.filter_type; @@ -6282,7 +6387,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) i = 0; if (vsi->vlan_num == 0) { TAILQ_FOREACH(f, &vsi->mac_list, next) { - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); mv_f[i].filter_type = f->mac_info.filter_type; mv_f[i].vlan_id = 0; @@ -6452,7 +6557,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = mac_filter->filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, + rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, ETH_ADDR_LEN); } @@ -6475,7 +6580,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) ret = I40E_ERR_NO_MEMORY; goto DONE; } - (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, + rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, ETH_ADDR_LEN); f->mac_info.filter_type = mac_filter->filter_type; TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); @@ -6522,7 +6627,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } if (filter_type == RTE_MACVLAN_PERFECT_MATCH || @@ -6549,104 +6654,36 @@ DONE: /* Configure hash enable flags for RSS */ uint64_t -i40e_config_hena(uint64_t flags, enum i40e_mac_type type) +i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags) { uint64_t hena = 0; + int i; if (!flags) return hena; - if (flags & ETH_RSS_FRAG_IPV4) - hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4; - if (flags & ETH_RSS_NONFRAG_IPV4_TCP) { - if (type == I40E_MAC_X722) { - hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - } else - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP; - } - if (flags & ETH_RSS_NONFRAG_IPV4_UDP) { - if (type == I40E_MAC_X722) { - hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - } else - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP; - } - if (flags & ETH_RSS_NONFRAG_IPV4_SCTP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; - if (flags & ETH_RSS_NONFRAG_IPV4_OTHER) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; - if (flags & ETH_RSS_FRAG_IPV6) - hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6; - if (flags & ETH_RSS_NONFRAG_IPV6_TCP) { - if (type == I40E_MAC_X722) { - hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); - } else - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP; - } - if (flags & ETH_RSS_NONFRAG_IPV6_UDP) { - if (type == I40E_MAC_X722) { - hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - } else - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) { + if (flags & (1ULL << i)) + hena |= adapter->pctypes_tbl[i]; } - if (flags & ETH_RSS_NONFRAG_IPV6_SCTP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; - if (flags & ETH_RSS_NONFRAG_IPV6_OTHER) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; - if (flags & ETH_RSS_L2_PAYLOAD) - hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD; return hena; } /* Parse the hash enable flags */ uint64_t -i40e_parse_hena(uint64_t flags) +i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags) { uint64_t rss_hf = 0; if (!flags) return rss_hf; - if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4)) - rss_hf |= ETH_RSS_FRAG_IPV4; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)) - rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER; - if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6)) - rss_hf |= ETH_RSS_FRAG_IPV6; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP; - if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER)) - rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER; - if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD)) - rss_hf |= ETH_RSS_L2_PAYLOAD; + int i; + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) { + if (flags & adapter->pctypes_tbl[i]) + rss_hf |= (1ULL << i); + } return rss_hf; } @@ -6655,16 +6692,9 @@ static void i40e_pf_disable_rss(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint64_t hena; - hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - if (hw->mac.type == I40E_MAC_X722) - hena &= ~I40E_RSS_HENA_ALL_X722; - else - hena &= ~I40E_RSS_HENA_ALL; - i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); - i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); I40E_WRITE_FLUSH(hw); } @@ -6736,7 +6766,6 @@ static int i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint64_t rss_hf; uint64_t hena; int ret; @@ -6745,14 +6774,7 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) if (ret) return ret; - rss_hf = rss_conf->rss_hf; - hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - if (hw->mac.type == I40E_MAC_X722) - hena &= ~I40E_RSS_HENA_ALL_X722; - else - hena &= ~I40E_RSS_HENA_ALL; - hena |= i40e_config_hena(rss_hf, hw->mac.type); + hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); I40E_WRITE_FLUSH(hw); @@ -6766,14 +6788,13 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev, { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL; + uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask; uint64_t hena; hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - if (!(hena & ((hw->mac.type == I40E_MAC_X722) - ? I40E_RSS_HENA_ALL_X722 - : I40E_RSS_HENA_ALL))) { /* RSS disabled */ + + if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -EINVAL; return 0; /* Nothing to do */ @@ -6798,7 +6819,7 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - rss_conf->rss_hf = i40e_parse_hena(hena); + rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena); return 0; } @@ -7071,7 +7092,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) /* create L1 filter */ filter_replace.old_filter_type = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; - filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11; filter_replace.tr_bit = 0; /* Prepare the buffer, 3 entries */ @@ -7119,12 +7140,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) I40E_AQC_MIRROR_CLOUD_FILTER; filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP; filter_replace.new_filter_type = - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + I40E_AQC_ADD_CLOUD_FILTER_0X11; /* Prepare the buffer, 2 entries */ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; filter_replace_buf.data[0] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; - filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11; filter_replace_buf.data[4] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; status = i40e_aq_replace_cloud_filters(hw, &filter_replace, @@ -7142,12 +7163,131 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) I40E_AQC_MIRROR_CLOUD_FILTER; filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC; filter_replace.new_filter_type = - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + I40E_AQC_ADD_CLOUD_FILTER_0X12; /* Prepare the buffer, 2 entries */ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; filter_replace_buf.data[0] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; - filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return status; +} + +static enum i40e_status_code +i40e_replace_gtp_l1_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + /* For GTP-C */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12; + filter_replace.tr_bit = I40E_AQC_NEW_TR_22 | + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[6] = 0xFF; + filter_replace_buf.data[7] = 0xFF; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + + /* for GTP-U */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13; + filter_replace.tr_bit = I40E_AQC_NEW_TR_21 | + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[6] = 0xFF; + filter_replace_buf.data[7] = 0xFF; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + return status; +} + +static enum +i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_status_code status = I40E_SUCCESS; + + /* for GTP-C */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + + /* for GTP-U */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X12; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; filter_replace_buf.data[4] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; @@ -7238,7 +7378,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = 0x40; big_buffer = 1; - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP; break; case I40E_TUNNEL_TYPE_MPLSoGRE: if (!pf->mpls_replace_flag) { @@ -7254,7 +7394,37 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = 0x0; big_buffer = 1; - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE; + break; + case I40E_TUNNEL_TYPE_GTPC: + if (!pf->gtp_replace_flag) { + i40e_replace_gtp_l1_filter(pf); + i40e_replace_gtp_cloud_filter(pf); + pf->gtp_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] = + (teid_le >> 16) & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] = + teid_le & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] = + 0x0; + big_buffer = 1; + break; + case I40E_TUNNEL_TYPE_GTPU: + if (!pf->gtp_replace_flag) { + i40e_replace_gtp_l1_filter(pf); + i40e_replace_gtp_cloud_filter(pf); + pf->gtp_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] = + (teid_le >> 16) & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] = + teid_le & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] = + 0x0; + big_buffer = 1; break; case I40E_TUNNEL_TYPE_QINQ: if (!pf->qinq_replace_flag) { @@ -7282,13 +7452,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP) pfilter->element.flags = - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP; + I40E_AQC_ADD_CLOUD_FILTER_0X11; else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE) pfilter->element.flags = - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE; + I40E_AQC_ADD_CLOUD_FILTER_0X12; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_0X12; else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ) pfilter->element.flags |= - I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + I40E_AQC_ADD_CLOUD_FILTER_0X10; else { val = i40e_dev_get_filter_type(tunnel_filter->filter_type, &pfilter->element.flags); @@ -7573,7 +7749,7 @@ i40e_pf_config_rss(struct i40e_pf *pf) } rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; - if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) { + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { i40e_pf_disable_rss(pf); return 0; } @@ -7794,9 +7970,9 @@ static int i40e_get_hash_filter_global_config(struct i40e_hw *hw, struct rte_eth_hash_global_conf *g_cfg) { - uint32_t reg, mask = I40E_FLOW_TYPES; - uint16_t i; - enum i40e_filter_pctype pctype; + struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; + uint32_t reg; + uint16_t i, j; memset(g_cfg, 0, sizeof(*g_cfg)); reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); @@ -7807,29 +7983,38 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, PMD_DRV_LOG(DEBUG, "Hash function is %s", (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); - for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) { - if (!(mask & (1UL << i))) - continue; - mask &= ~(1UL << i); - /* Bit set indicats the coresponding flow type is supported */ - g_cfg->valid_bit_mask[0] |= (1UL << i); - /* if flowtype is invalid, continue */ - if (!I40E_VALID_FLOW(i)) + /* + * We work only with lowest 32 bits which is not correct, but to work + * properly the valid_bit_mask size should be increased up to 64 bits + * and this will brake ABI. This modification will be done in next + * release + */ + g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask; + + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) { + if (!adapter->pctypes_tbl[i]) continue; - pctype = i40e_flowtype_to_pctype(i); - reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype)); - if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) - g_cfg->sym_hash_enable_mask[0] |= (1UL << i); + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (adapter->pctypes_tbl[i] & (1ULL << j)) { + reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j)); + if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) { + g_cfg->sym_hash_enable_mask[0] |= + (1UL << i); + } + } + } } return 0; } static int -i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg) +i40e_hash_global_config_check(const struct i40e_adapter *adapter, + const struct rte_eth_hash_global_conf *g_cfg) { uint32_t i; - uint32_t mask0, i40e_mask = I40E_FLOW_TYPES; + uint32_t mask0, i40e_mask = adapter->flow_types_mask; if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && @@ -7872,64 +8057,36 @@ static int i40e_set_hash_filter_global_config(struct i40e_hw *hw, struct rte_eth_hash_global_conf *g_cfg) { + struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; int ret; - uint16_t i; + uint16_t i, j; uint32_t reg; - uint32_t mask0 = g_cfg->valid_bit_mask[0]; - enum i40e_filter_pctype pctype; + /* + * We work only with lowest 32 bits which is not correct, but to work + * properly the valid_bit_mask size should be increased up to 64 bits + * and this will brake ABI. This modification will be done in next + * release + */ + uint32_t mask0 = g_cfg->valid_bit_mask[0] & + (uint32_t)adapter->flow_types_mask; /* Check the input parameters */ - ret = i40e_hash_global_config_check(g_cfg); + ret = i40e_hash_global_config_check(adapter, g_cfg); if (ret < 0) return ret; - for (i = 0; mask0 && i < UINT32_BIT; i++) { - if (!(mask0 & (1UL << i))) - continue; - mask0 &= ~(1UL << i); - /* if flowtype is invalid, continue */ - if (!I40E_VALID_FLOW(i)) - continue; - pctype = i40e_flowtype_to_pctype(i); - reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? - I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - if (hw->mac.type == I40E_MAC_X722) { - if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP), - reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP), - reg); - } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK), - reg); - } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP), - reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP), - reg); - } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg); - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM( - I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK), - reg); - } else { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), - reg); + for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) { + if (mask0 & (1UL << i)) { + reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? + I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; + + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (adapter->pctypes_tbl[i] & (1ULL << j)) + i40e_write_rx_ctl(hw, + I40E_GLQF_HSYM(j), + reg); } - } else { - i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); } } @@ -8551,16 +8708,14 @@ i40e_filter_input_set_init(struct i40e_pf *pf) uint64_t input_set, inset_reg; uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; int num, i; + uint16_t flow_type; for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { - if (hw->mac.type == I40E_MAC_X722) { - if (!I40E_VALID_PCTYPE_X722(pctype)) - continue; - } else { - if (!I40E_VALID_PCTYPE(pctype)) - continue; - } + flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype); + + if (flow_type == RTE_ETH_FLOW_UNKNOWN) + continue; input_set = i40e_get_default_input_set(pctype); @@ -8623,7 +8778,8 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, return -EINVAL; } - if (!I40E_VALID_FLOW(conf->flow_type)) { + pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } @@ -8631,10 +8787,8 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, if (hw->mac.type == I40E_MAC_X722) { /* get translated pctype value in fd pctype register */ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw, - I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype( - conf->flow_type))); - } else - pctype = i40e_flowtype_to_pctype(conf->flow_type); + I40E_GLQF_FD_PCTYPES((int)pctype)); + } ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); @@ -8642,11 +8796,7 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, PMD_DRV_LOG(ERR, "Failed to parse input set"); return -EINVAL; } - if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH, - input_set) != 0) { - PMD_DRV_LOG(ERR, "Invalid input set"); - return -EINVAL; - } + if (conf->op == RTE_ETH_INPUT_SET_ADD) { /* get inset value in register */ inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); @@ -8700,24 +8850,19 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, return -EINVAL; } - if (!I40E_VALID_FLOW(conf->flow_type)) { + pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); + + if (pctype == I40E_FILTER_PCTYPE_INVALID) { PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } - pctype = i40e_flowtype_to_pctype(conf->flow_type); - ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); if (ret) { PMD_DRV_LOG(ERR, "Failed to parse input set"); return -EINVAL; } - if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR, - input_set) != 0) { - PMD_DRV_LOG(ERR, "Invalid input set"); - return -EINVAL; - } /* get inset value in register */ inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); @@ -9156,72 +9301,42 @@ i40e_hw_init(struct rte_eth_dev *dev) i40e_set_symmetric_hash_enable_per_port(hw, 0); } +/* + * For X722 it is possible to have multiple pctypes mapped to the same flowtype + * however this function will return only one highest pctype index, + * which is not quite correct. This is known problem of i40e driver + * and needs to be fixed later. + */ enum i40e_filter_pctype -i40e_flowtype_to_pctype(uint16_t flow_type) -{ - static const enum i40e_filter_pctype pctype_table[] = { - [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4, - [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = - I40E_FILTER_PCTYPE_NONF_IPV4_UDP, - [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = - I40E_FILTER_PCTYPE_NONF_IPV4_TCP, - [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = - I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, - [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = - I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, - [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6, - [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = - I40E_FILTER_PCTYPE_NONF_IPV6_UDP, - [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = - I40E_FILTER_PCTYPE_NONF_IPV6_TCP, - [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = - I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, - [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = - I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, - [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD, - }; +i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type) +{ + int i; + uint64_t pctype_mask; - return pctype_table[flow_type]; + if (flow_type < I40E_FLOW_TYPE_MAX) { + pctype_mask = adapter->pctypes_tbl[flow_type]; + for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) { + if (pctype_mask & (1ULL << i)) + return (enum i40e_filter_pctype)i; + } + } + return I40E_FILTER_PCTYPE_INVALID; } uint16_t -i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) +i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, + enum i40e_filter_pctype pctype) { - static const uint16_t flowtype_table[] = { - [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4, - [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV4_UDP, - [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV4_UDP, - [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV4_UDP, - [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = - RTE_ETH_FLOW_NONFRAG_IPV4_TCP, - [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = - RTE_ETH_FLOW_NONFRAG_IPV4_TCP, - [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = - RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, - [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = - RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, - [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6, - [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV6_UDP, - [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV6_UDP, - [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = - RTE_ETH_FLOW_NONFRAG_IPV6_UDP, - [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = - RTE_ETH_FLOW_NONFRAG_IPV6_TCP, - [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = - RTE_ETH_FLOW_NONFRAG_IPV6_TCP, - [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = - RTE_ETH_FLOW_NONFRAG_IPV6_SCTP, - [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = - RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, - [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD, - }; + uint16_t flowtype; + uint64_t pctype_mask = 1ULL << pctype; + + for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX; + flowtype++) { + if (adapter->pctypes_tbl[flowtype] & pctype_mask) + return flowtype; + } - return flowtype_table[pctype]; + return RTE_ETH_FLOW_UNKNOWN; } /* @@ -9238,7 +9353,7 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) /* For both X710 and XL710 */ #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200 -#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200 #define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 @@ -10248,9 +10363,9 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) goto out; } /* update the local VSI info with updated queue map */ - (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, sizeof(vsi->info.tc_mapping)); - (void)rte_memcpy(&vsi->info.queue_mapping, + rte_memcpy(&vsi->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(vsi->info.queue_mapping)); vsi->info.mapping_flags = ctxt.info.mapping_flags; @@ -10377,7 +10492,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, * * Returns 0 on success, negative value on failure */ -static int +int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10809,14 +10924,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) sizeof(f->input.general_fields)); if (((f->input.flags & - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) == - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) || + I40E_AQC_ADD_CLOUD_FILTER_0X11) == + I40E_AQC_ADD_CLOUD_FILTER_0X11) || ((f->input.flags & - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) == - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) || + I40E_AQC_ADD_CLOUD_FILTER_0X12) == + I40E_AQC_ADD_CLOUD_FILTER_0X12) || ((f->input.flags & - I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) == - I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ)) + I40E_AQC_ADD_CLOUD_FILTER_0X10) == + I40E_AQC_ADD_CLOUD_FILTER_0X10)) big_buffer = 1; if (big_buffer) @@ -10851,6 +10966,301 @@ is_i40e_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &rte_i40e_pmd); } +struct i40e_customized_pctype* +i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index) +{ + int i; + + for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) { + if (pf->customized_pctype[i].index == index) + return &pf->customized_pctype[i]; + } + return NULL; +} + +static int +i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, uint32_t proto_num, + struct rte_pmd_i40e_proto_info *proto) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint32_t pctype_num; + struct rte_pmd_i40e_ptype_info *pctype; + uint32_t buff_size; + struct i40e_customized_pctype *new_pctype = NULL; + uint8_t proto_id; + uint8_t pctype_value; + char name[64]; + uint32_t i, j, n; + int ret; + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&pctype_num, sizeof(pctype_num), + RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get pctype number"); + return -1; + } + if (!pctype_num) { + PMD_DRV_LOG(INFO, "No new pctype added"); + return -1; + } + + buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info); + pctype = rte_zmalloc("new_pctype", buff_size, 0); + if (!pctype) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return -1; + } + /* get information about new pctype list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)pctype, buff_size, + RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get pctype list"); + rte_free(pctype); + return -1; + } + + /* Update customized pctype. */ + for (i = 0; i < pctype_num; i++) { + pctype_value = pctype[i].ptype_id; + memset(name, 0, sizeof(name)); + for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { + proto_id = pctype[i].protocols[j]; + if (proto_id == RTE_PMD_I40E_PROTO_UNUSED) + continue; + for (n = 0; n < proto_num; n++) { + if (proto[n].proto_id != proto_id) + continue; + strcat(name, proto[n].name); + strcat(name, "_"); + break; + } + } + name[strlen(name) - 1] = '\0'; + if (!strcmp(name, "GTPC")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPC); + else if (!strcmp(name, "GTPU_IPV4")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV4); + else if (!strcmp(name, "GTPU_IPV6")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV6); + else if (!strcmp(name, "GTPU")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU); + if (new_pctype) { + new_pctype->pctype = pctype_value; + new_pctype->valid = true; + } + } + + rte_free(pctype); + return 0; +} + +static int +i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, uint32_t proto_num, + struct rte_pmd_i40e_proto_info *proto) +{ + struct rte_pmd_i40e_ptype_mapping *ptype_mapping; + uint16_t port_id = dev->data->port_id; + uint32_t ptype_num; + struct rte_pmd_i40e_ptype_info *ptype; + uint32_t buff_size; + uint8_t proto_id; + char name[RTE_PMD_I40E_DDP_NAME_SIZE]; + uint32_t i, j, n; + bool inner_ip; + int ret; + + /* get information about new ptype num */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&ptype_num, sizeof(ptype_num), + RTE_PMD_I40E_PKG_INFO_PTYPE_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get ptype number"); + return ret; + } + if (!ptype_num) { + PMD_DRV_LOG(INFO, "No new ptype added"); + return -1; + } + + buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info); + ptype = rte_zmalloc("new_ptype", buff_size, 0); + if (!ptype) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return -1; + } + + /* get information about new ptype list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)ptype, buff_size, + RTE_PMD_I40E_PKG_INFO_PTYPE_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get ptype list"); + rte_free(ptype); + return ret; + } + + buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping); + ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0); + if (!ptype_mapping) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + rte_free(ptype); + return -1; + } + + /* Update ptype mapping table. */ + for (i = 0; i < ptype_num; i++) { + ptype_mapping[i].hw_ptype = ptype[i].ptype_id; + ptype_mapping[i].sw_ptype = 0; + inner_ip = false; + for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { + proto_id = ptype[i].protocols[j]; + if (proto_id == RTE_PMD_I40E_PROTO_UNUSED) + continue; + for (n = 0; n < proto_num; n++) { + if (proto[n].proto_id != proto_id) + continue; + memset(name, 0, sizeof(name)); + strcpy(name, proto[n].name); + if (!strncmp(name, "IPV4", 4) && !inner_ip) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + inner_ip = true; + } else if (!strncmp(name, "IPV4FRAG", 8) && + inner_ip) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_FRAG; + } else if (!strncmp(name, "IPV4", 4) && + inner_ip) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + else if (!strncmp(name, "IPV6", 4) && + !inner_ip) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + inner_ip = true; + } else if (!strncmp(name, "IPV6FRAG", 8) && + inner_ip) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_FRAG; + } else if (!strncmp(name, "IPV6", 4) && + inner_ip) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + else if (!strncmp(name, "GTPC", 4)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GTPC; + else if (!strncmp(name, "GTPU", 4)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GTPU; + else if (!strncmp(name, "UDP", 3)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_UDP; + else if (!strncmp(name, "TCP", 3)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_TCP; + else if (!strncmp(name, "SCTP", 4)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_SCTP; + else if (!strncmp(name, "ICMP", 4) || + !strncmp(name, "ICMPV6", 6)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_ICMP; + + break; + } + } + } + + ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping, + ptype_num, 0); + if (ret) + PMD_DRV_LOG(ERR, "Failed to update mapping table."); + + rte_free(ptype_mapping); + rte_free(ptype); + return ret; +} + +void +i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint32_t proto_num; + struct rte_pmd_i40e_proto_info *proto; + uint32_t buff_size; + uint32_t i; + int ret; + + /* get information about protocol number */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&proto_num, sizeof(proto_num), + RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get protocol number"); + return; + } + if (!proto_num) { + PMD_DRV_LOG(INFO, "No new protocol added"); + return; + } + + buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info); + proto = rte_zmalloc("new_proto", buff_size, 0); + if (!proto) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return; + } + + /* get information about protocol list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)proto, buff_size, + RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get protocol list"); + rte_free(proto); + return; + } + + /* Check if GTP is supported. */ + for (i = 0; i < proto_num; i++) { + if (!strncmp(proto[i].name, "GTP", 3)) { + pf->gtp_support = true; + break; + } + } + + /* Update customized pctype info */ + ret = i40e_update_customized_pctype(dev, pkg, pkg_size, + proto_num, proto); + if (ret) + PMD_DRV_LOG(INFO, "No pctype is updated."); + + /* Update customized ptype info */ + ret = i40e_update_customized_ptype(dev, pkg, pkg_size, + proto_num, proto); + if (ret) + PMD_DRV_LOG(INFO, "No ptype is updated."); + + rte_free(proto); +} + /* Create a QinQ cloud filter * * The Fortville NIC has limited resources for tunnel filters, @@ -10911,7 +11321,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) /* create L1 filter */ filter_replace.old_filter_type = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN; - filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10; filter_replace.tr_bit = 0; /* Prepare the buffer, 2 entries */ @@ -10942,13 +11352,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) /* create L2 filter, input for L2 filter will be L1 filter */ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP; - filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10; /* Prepare the buffer, 2 entries */ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; filter_replace_buf.data[0] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; - filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10; filter_replace_buf.data[4] |= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 48abc05a..cd67453d 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -186,9 +186,9 @@ enum i40e_flxpld_layer_idx { /* Default queue interrupt throttling time in microseconds */ #define I40E_ITR_INDEX_DEFAULT 0 +#define I40E_ITR_INDEX_NONE 3 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ - /* Special FW support this floating VEB feature */ #define FLOATING_VEB_SUPPORTED_FW_MAJ 5 #define FLOATING_VEB_SUPPORTED_FW_MIN 0 @@ -260,6 +260,12 @@ enum i40e_flxpld_layer_idx { #define I40E_QOS_BW_WEIGHT_MIN 1 /* The max bandwidth weight is 127. */ #define I40E_QOS_BW_WEIGHT_MAX 127 +/* The max queue region index is 7. */ +#define I40E_REGION_MAX_INDEX 7 + +#define I40E_MAX_PERCENT 100 +#define I40E_DEFAULT_DCB_APP_NUM 1 +#define I40E_DEFAULT_DCB_APP_PRIO 3 /** * The overhead from MTU to max frame size. @@ -460,6 +466,119 @@ struct i40e_vmdq_info { #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off)) #define I40E_FDIR_IPv6_TC_OFFSET 20 +/* A structure used to define the input for GTP flow */ +struct i40e_gtp_flow { + struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */ + uint8_t msg_type; /* Message type. */ + uint32_t teid; /* TEID in big endian. */ +}; + +/* A structure used to define the input for GTP IPV4 flow */ +struct i40e_gtp_ipv4_flow { + struct i40e_gtp_flow gtp; + struct rte_eth_ipv4_flow ip4; +}; + +/* A structure used to define the input for GTP IPV6 flow */ +struct i40e_gtp_ipv6_flow { + struct i40e_gtp_flow gtp; + struct rte_eth_ipv6_flow ip6; +}; + +/* A structure used to define the input for raw type flow */ +struct i40e_raw_flow { + uint16_t pctype; + void *packet; + uint32_t length; +}; + +/* + * A union contains the inputs for all types of flow + * items in flows need to be in big endian + */ +union i40e_fdir_flow { + struct rte_eth_l2_flow l2_flow; + struct rte_eth_udpv4_flow udp4_flow; + struct rte_eth_tcpv4_flow tcp4_flow; + struct rte_eth_sctpv4_flow sctp4_flow; + struct rte_eth_ipv4_flow ip4_flow; + struct rte_eth_udpv6_flow udp6_flow; + struct rte_eth_tcpv6_flow tcp6_flow; + struct rte_eth_sctpv6_flow sctp6_flow; + struct rte_eth_ipv6_flow ipv6_flow; + struct i40e_gtp_flow gtp_flow; + struct i40e_gtp_ipv4_flow gtp_ipv4_flow; + struct i40e_gtp_ipv6_flow gtp_ipv6_flow; + struct i40e_raw_flow raw_flow; +}; + +enum i40e_fdir_ip_type { + I40E_FDIR_IPTYPE_IPV4, + I40E_FDIR_IPTYPE_IPV6, +}; + +/* A structure used to contain extend input of flow */ +struct i40e_fdir_flow_ext { + uint16_t vlan_tci; + uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN]; + /* It is filled by the flexible payload to match. */ + uint8_t is_vf; /* 1 for VF, 0 for port dev */ + uint16_t dst_id; /* VF ID, available when is_vf is 1*/ + bool inner_ip; /* If there is inner ip */ + enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */ + bool customized_pctype; /* If customized pctype is used */ + bool pkt_template; /* If raw packet template is used */ +}; + +/* A structure used to define the input for a flow director filter entry */ +struct i40e_fdir_input { + enum i40e_filter_pctype pctype; + union i40e_fdir_flow flow; + /* Flow fields to match, dependent on flow_type */ + struct i40e_fdir_flow_ext flow_ext; + /* Additional fields to match */ +}; + +/* Behavior will be taken if FDIR match */ +enum i40e_fdir_behavior { + I40E_FDIR_ACCEPT = 0, + I40E_FDIR_REJECT, + I40E_FDIR_PASSTHRU, +}; + +/* Flow director report status + * It defines what will be reported if FDIR entry is matched. + */ +enum i40e_fdir_status { + I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */ + I40E_FDIR_REPORT_ID, /* Only report FD ID. */ + I40E_FDIR_REPORT_ID_FLEX_4, /* Report FD ID and 4 flex bytes. */ + I40E_FDIR_REPORT_FLEX_8, /* Report 8 flex bytes. */ +}; + +/* A structure used to define an action when match FDIR packet filter. */ +struct i40e_fdir_action { + uint16_t rx_queue; /* Queue assigned to if FDIR match. */ + enum i40e_fdir_behavior behavior; /* Behavior will be taken */ + enum i40e_fdir_status report_status; /* Status report option */ + /* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or + * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported + * flex bytes start from in flexible payload. + */ + uint8_t flex_off; +}; + +/* A structure used to define the flow director filter entry by filter_ctrl API + * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and + * RTE_ETH_FILTER_DELETE operations. + */ +struct i40e_fdir_filter_conf { + uint32_t soft_id; + /* ID, an unique value is required when deal with FDIR entry */ + struct i40e_fdir_input input; /* Input set */ + struct i40e_fdir_action action; /* Action taken when match */ +}; + /* * Structure to store flex pit for flow diretor. */ @@ -478,12 +597,13 @@ struct i40e_fdir_flex_mask { } bitmask[I40E_FDIR_BITMASK_NUM_WORD]; }; -#define I40E_FILTER_PCTYPE_MAX 64 -#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8) +#define I40E_FILTER_PCTYPE_INVALID 0 +#define I40E_FILTER_PCTYPE_MAX 64 +#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8) struct i40e_fdir_filter { TAILQ_ENTRY(i40e_fdir_filter) rules; - struct rte_eth_fdir_filter fdir; + struct i40e_fdir_filter_conf fdir; }; TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter); @@ -541,17 +661,49 @@ struct i40e_ethertype_rule { struct rte_hash *hash_table; }; +/* queue region info */ +struct i40e_queue_region_info { + /* the region id for this configuration */ + uint8_t region_id; + /* the start queue index for this region */ + uint8_t queue_start_index; + /* the total queue number of this queue region */ + uint8_t queue_num; + /* the total number of user priority for this region */ + uint8_t user_priority_num; + /* the packet's user priority for this region */ + uint8_t user_priority[I40E_MAX_USER_PRIORITY]; + /* the total number of flowtype for this region */ + uint8_t flowtype_num; + /** + * the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + uint8_t hw_flowtype[I40E_FILTER_PCTYPE_MAX]; +}; + +struct i40e_queue_regions { + /* the total number of queue region for this port */ + uint16_t queue_region_number; + struct i40e_queue_region_info region[I40E_REGION_MAX_INDEX + 1]; +}; + /* Tunnel filter number HW supports */ #define I40E_MAX_TUNNEL_FILTER_NUM 400 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9 -#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10 -#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11 -#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12 -#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP 8 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE 9 +#define I40E_AQC_ADD_CLOUD_FILTER_0X10 0x10 +#define I40E_AQC_ADD_CLOUD_FILTER_0X11 0x11 +#define I40E_AQC_ADD_CLOUD_FILTER_0X12 0x12 +#define I40E_AQC_ADD_L1_FILTER_0X11 0x11 +#define I40E_AQC_ADD_L1_FILTER_0X12 0x12 +#define I40E_AQC_ADD_L1_FILTER_0X13 0x13 +#define I40E_AQC_NEW_TR_21 21 +#define I40E_AQC_NEW_TR_22 22 enum i40e_tunnel_iptype { I40E_TUNNEL_IPTYPE_IPV4, @@ -599,6 +751,8 @@ enum i40e_tunnel_type { I40E_TUNNEL_TYPE_MPLSoUDP, I40E_TUNNEL_TYPE_MPLSoGRE, I40E_TUNNEL_TYPE_QINQ, + I40E_TUNNEL_TYPE_GTPC, + I40E_TUNNEL_TYPE_GTPU, I40E_TUNNEL_TYPE_MAX, }; @@ -722,6 +876,21 @@ struct i40e_tm_conf { bool committed; }; +enum i40e_new_pctype { + I40E_CUSTOMIZED_GTPC = 0, + I40E_CUSTOMIZED_GTPU_IPV4, + I40E_CUSTOMIZED_GTPU_IPV6, + I40E_CUSTOMIZED_GTPU, + I40E_CUSTOMIZED_MAX, +}; + +#define I40E_FILTER_PCTYPE_INVALID 0 +struct i40e_customized_pctype { + enum i40e_new_pctype index; /* Indicate which customized pctype */ + uint8_t pctype; /* New pctype value */ + bool valid; /* Check if it's valid */ +}; + /* * Structure to store private data specific for PF instance. */ @@ -776,6 +945,7 @@ struct i40e_pf { struct i40e_fdir_info fdir; /* flow director info */ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */ + struct i40e_queue_regions queue_region; /* queue region info */ struct i40e_fc_conf fc_conf; /* Flow control conf */ struct i40e_mirror_rule_list mirror_list; uint16_t nb_mirror_rule; /* The number of mirror rules */ @@ -784,8 +954,14 @@ struct i40e_pf { bool floating_veb_list[I40E_MAX_VF]; struct i40e_flow_list flow_list; bool mpls_replace_flag; /* 1 - MPLS filter replace is done */ + bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */ bool qinq_replace_flag; /* QINQ filter replace is done */ struct i40e_tm_conf tm_conf; + + /* Dynamic Device Personalization */ + bool gtp_support; /* 1 - support GTP-C and GTP-U */ + /* customer customized pctype */ + struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX]; }; enum pending_msg { @@ -852,7 +1028,8 @@ struct i40e_vf { uint64_t flags; }; -#define I40E_MAX_PKT_TYPE 256 +#define I40E_MAX_PKT_TYPE 256 +#define I40E_FLOW_TYPE_MAX 64 /* * Structure to store private data for each PF/VF instance. @@ -881,13 +1058,17 @@ struct i40e_adapter { /* ptype mapping table */ uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned; + /* flow type to pctype mapping table */ + uint64_t pctypes_tbl[I40E_FLOW_TYPE_MAX] __rte_cache_min_aligned; + uint64_t flow_types_mask; + uint64_t pctypes_mask; }; extern const struct rte_flow_ops i40e_flow_ops; union i40e_filter_t { struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_fdir_filter fdir_filter; + struct i40e_fdir_filter_conf fdir_filter; struct rte_eth_tunnel_filter_conf tunnel_filter; struct i40e_tunnel_filter_conf consistent_tunnel_filter; }; @@ -919,14 +1100,14 @@ void i40e_update_vsi_stats(struct i40e_vsi *vsi); void i40e_pf_disable_irq0(struct i40e_hw *hw); void i40e_pf_enable_irq0(struct i40e_hw *hw); int i40e_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); -void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi); +void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx); void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi); int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, struct i40e_vsi_vlan_pvid_info *info); int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on); int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on); -uint64_t i40e_config_hena(uint64_t flags, enum i40e_mac_type type); -uint64_t i40e_parse_hena(uint64_t flags); +uint64_t i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags); +uint64_t i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags); enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf); enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf); int i40e_fdir_setup(struct i40e_pf *pf); @@ -935,8 +1116,11 @@ const struct rte_memzone *i40e_memzone_reserve(const char *name, int socket_id); int i40e_fdir_configure(struct rte_eth_dev *dev); void i40e_fdir_teardown(struct i40e_pf *pf); -enum i40e_filter_pctype i40e_flowtype_to_pctype(uint16_t flow_type); -uint16_t i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype); +enum i40e_filter_pctype + i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, + uint16_t flow_type); +uint16_t i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, + enum i40e_filter_pctype pctype); int i40e_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -961,7 +1145,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule, int i40e_sw_ethertype_filter_del(struct i40e_pf *pf, struct i40e_ethertype_filter_input *input); int i40e_sw_fdir_filter_del(struct i40e_pf *pf, - struct rte_eth_fdir_input *input); + struct i40e_fdir_input *input); struct i40e_tunnel_filter * i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule, const struct i40e_tunnel_filter_input *input); @@ -974,6 +1158,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf, int i40e_add_del_fdir_filter(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *filter, bool add); +int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct i40e_fdir_filter_conf *filter, + bool add); int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct rte_eth_tunnel_filter_conf *tunnel_filter, uint8_t add); @@ -1003,6 +1190,14 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val); int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops); void i40e_tm_conf_init(struct rte_eth_dev *dev); void i40e_tm_conf_uninit(struct rte_eth_dev *dev); +struct i40e_customized_pctype* +i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index); +void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size); +int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); +int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, + struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); +void i40e_init_queue_region_conf(struct rte_eth_dev *dev); #define I40E_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index f6d82934..02d9e579 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -47,10 +47,10 @@ #include <rte_log.h> #include <rte_debug.h> #include <rte_pci.h> +#include <rte_bus_pci.h> #include <rte_atomic.h> #include <rte_branch_prediction.h> #include <rte_memory.h> -#include <rte_memzone.h> #include <rte_eal.h> #include <rte_alarm.h> #include <rte_ether.h> @@ -67,8 +67,6 @@ #include "i40e_rxtx.h" #include "i40e_ethdev.h" #include "i40e_pf.h" -#define I40EVF_VSI_DEFAULT_MSIX_INTR 1 -#define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0 /* busy wait delay in msec */ #define I40EVF_BUSY_WAIT_DELAY 10 @@ -108,7 +106,7 @@ static void i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int i40evf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void i40evf_dev_stats_get(struct rte_eth_dev *dev, +static int i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n); @@ -118,10 +116,9 @@ static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev, static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev); static int i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); -static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask); -static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, - int on); +static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void i40evf_dev_close(struct rte_eth_dev *dev); +static int i40evf_dev_reset(struct rte_eth_dev *dev); static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev); static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev); static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev); @@ -199,15 +196,16 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .allmulticast_disable = i40evf_dev_allmulticast_disable, .link_update = i40evf_dev_link_update, .stats_get = i40evf_dev_stats_get, + .stats_reset = i40evf_dev_xstats_reset, .xstats_get = i40evf_dev_xstats_get, .xstats_get_names = i40evf_dev_xstats_get_names, .xstats_reset = i40evf_dev_xstats_reset, .dev_close = i40evf_dev_close, + .dev_reset = i40evf_dev_reset, .dev_infos_get = i40evf_dev_info_get, .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, .vlan_filter_set = i40evf_vlan_filter_set, .vlan_offload_set = i40evf_vlan_offload_set, - .vlan_pvid_set = i40evf_vlan_pvid_set, .rx_queue_start = i40evf_dev_rx_queue_start, .rx_queue_stop = i40evf_dev_rx_queue_stop, .tx_queue_start = i40evf_dev_tx_queue_start, @@ -431,9 +429,7 @@ i40evf_check_api_version(struct rte_eth_dev *dev) pver = (struct virtchnl_version_info *)args.out_buffer; vf->version_major = pver->major; vf->version_minor = pver->minor; - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) - PMD_DRV_LOG(INFO, "Peer is DPDK PF host"); - else if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) && + if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) && (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) PMD_DRV_LOG(INFO, "Peer is Linux PF host"); else { @@ -481,7 +477,7 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev) len = sizeof(struct virtchnl_vf_resource) + I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); - (void)rte_memcpy(vf->vf_res, args.out_buffer, + rte_memcpy(vf->vf_res, args.out_buffer, RTE_MIN(args.out_size, len)); i40e_vf_parse_hw_config(hw, vf->vf_res); @@ -563,37 +559,6 @@ i40evf_disable_vlan_strip(struct rte_eth_dev *dev) return ret; } -static int -i40evf_config_vlan_pvid(struct rte_eth_dev *dev, - struct i40e_vsi_vlan_pvid_info *info) -{ - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - int err; - struct vf_cmd_info args; - struct virtchnl_pvid_info tpid_info; - - if (info == NULL) { - PMD_DRV_LOG(ERR, "invalid parameters"); - return I40E_ERR_PARAM; - } - - memset(&tpid_info, 0, sizeof(tpid_info)); - tpid_info.vsi_id = vf->vsi_res->vsi_id; - (void)rte_memcpy(&tpid_info.info, info, sizeof(*info)); - - args.ops = (enum virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID; - args.in_args = (uint8_t *)&tpid_info; - args.in_args_size = sizeof(tpid_info); - args.out_buffer = vf->aq_resp; - args.out_size = I40E_AQ_BUF_SZ; - - err = i40evf_execute_vf_cmd(dev, &args); - if (err) - PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID"); - - return err; -} - static void i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info, uint16_t vsi_id, @@ -629,7 +594,6 @@ i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info, } } -/* It configures VSI queues to co-work with Linux PF host */ static int i40evf_configure_vsi_queues(struct rte_eth_dev *dev) { @@ -673,72 +637,6 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev) return ret; } -/* It configures VSI queues to co-work with DPDK PF host */ -static int -i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev) -{ - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - struct i40e_rx_queue **rxq = - (struct i40e_rx_queue **)dev->data->rx_queues; - struct i40e_tx_queue **txq = - (struct i40e_tx_queue **)dev->data->tx_queues; - struct virtchnl_vsi_queue_config_ext_info *vc_vqcei; - struct virtchnl_queue_pair_ext_info *vc_qpei; - struct vf_cmd_info args; - uint16_t i, nb_qp = vf->num_queue_pairs; - const uint32_t size = - I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp); - uint8_t buff[size]; - int ret; - - memset(buff, 0, sizeof(buff)); - vc_vqcei = (struct virtchnl_vsi_queue_config_ext_info *)buff; - vc_vqcei->vsi_id = vf->vsi_res->vsi_id; - vc_vqcei->num_queue_pairs = nb_qp; - vc_qpei = vc_vqcei->qpair; - for (i = 0; i < nb_qp; i++, vc_qpei++) { - i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq, - vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]); - i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq, - vc_vqcei->vsi_id, i, dev->data->nb_rx_queues, - vf->max_pkt_len, rxq[i]); - if (i < dev->data->nb_rx_queues) - /* - * It adds extra info for configuring VSI queues, which - * is needed to enable the configurable crc stripping - * in VF. - */ - vc_qpei->rxq_ext.crcstrip = - dev->data->dev_conf.rxmode.hw_strip_crc; - } - memset(&args, 0, sizeof(args)); - args.ops = - (enum virtchnl_ops)VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT; - args.in_args = (uint8_t *)vc_vqcei; - args.in_args_size = size; - args.out_buffer = vf->aq_resp; - args.out_size = I40E_AQ_BUF_SZ; - ret = i40evf_execute_vf_cmd(dev, &args); - if (ret) - PMD_DRV_LOG(ERR, "Failed to execute command of " - "VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT"); - - return ret; -} - -static int -i40evf_configure_queues(struct rte_eth_dev *dev) -{ - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) - /* To support DPDK PF host */ - return i40evf_configure_vsi_queues_ext(dev); - else - /* To support Linux PF host */ - return i40evf_configure_vsi_queues(dev); -} - static int i40evf_config_irq_map(struct rte_eth_dev *dev) { @@ -752,14 +650,10 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) uint32_t vector_id; int i, err; - if (rte_intr_allow_others(intr_handle)) { - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) - vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR; - else - vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX; - } else { + if (rte_intr_allow_others(intr_handle)) + vector_id = I40E_RX_VEC_START; + else vector_id = I40E_MISC_VEC_ID; - } map_info = (struct virtchnl_irq_map_info *)cmd_buffer; map_info->num_vectors = 1; @@ -888,7 +782,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, int err; struct vf_cmd_info args; - if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) { + if (is_zero_ether_addr(addr)) { PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x", addr->addr_bytes[0], addr->addr_bytes[1], addr->addr_bytes[2], addr->addr_bytes[3], @@ -899,7 +793,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, list = (struct virtchnl_ether_addr_list *)cmd_buffer; list->vsi_id = vf->vsi_res->vsi_id; list->num_elements = 1; - (void)rte_memcpy(list->list[0].addr, addr->addr_bytes, + rte_memcpy(list->list[0].addr, addr->addr_bytes, sizeof(addr->addr_bytes)); args.ops = VIRTCHNL_OP_ADD_ETH_ADDR; @@ -939,7 +833,7 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev, list = (struct virtchnl_ether_addr_list *)cmd_buffer; list->vsi_id = vf->vsi_res->vsi_id; list->num_elements = 1; - (void)rte_memcpy(list->list[0].addr, addr->addr_bytes, + rte_memcpy(list->list[0].addr, addr->addr_bytes, sizeof(addr->addr_bytes)); args.ops = VIRTCHNL_OP_DEL_ETH_ADDR; @@ -968,7 +862,7 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) } static int -i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) +i40evf_query_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct virtchnl_queue_select q_stats; @@ -993,26 +887,58 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) return 0; } -static int -i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +static void +i40evf_stat_update_48(uint64_t *offset, + uint64_t *stat) { - int ret; - struct i40e_eth_stats *pstats = NULL; + if (*stat >= *offset) + *stat = *stat - *offset; + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); - ret = i40evf_update_stats(dev, &pstats); - if (ret != 0) - return 0; + *stat &= I40E_48_BIT_MASK; +} - stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + - pstats->rx_broadcast; - stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + - pstats->tx_unicast; - stats->imissed = pstats->rx_discards; - stats->oerrors = pstats->tx_errors + pstats->tx_discards; - stats->ibytes = pstats->rx_bytes; - stats->obytes = pstats->tx_bytes; +static void +i40evf_stat_update_32(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = (uint64_t)(*stat - *offset); + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); +} - return 0; +static void +i40evf_update_stats(struct i40e_vsi *vsi, + struct i40e_eth_stats *nes) +{ + struct i40e_eth_stats *oes = &vsi->eth_stats_offset; + + i40evf_stat_update_48(&oes->rx_bytes, + &nes->rx_bytes); + i40evf_stat_update_48(&oes->rx_unicast, + &nes->rx_unicast); + i40evf_stat_update_48(&oes->rx_multicast, + &nes->rx_multicast); + i40evf_stat_update_48(&oes->rx_broadcast, + &nes->rx_broadcast); + i40evf_stat_update_32(&oes->rx_discards, + &nes->rx_discards); + i40evf_stat_update_32(&oes->rx_unknown_protocol, + &nes->rx_unknown_protocol); + i40evf_stat_update_48(&oes->tx_bytes, + &nes->tx_bytes); + i40evf_stat_update_48(&oes->tx_unicast, + &nes->tx_unicast); + i40evf_stat_update_48(&oes->tx_multicast, + &nes->tx_multicast); + i40evf_stat_update_48(&oes->tx_broadcast, + &nes->tx_broadcast); + i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors); + i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } static void @@ -1022,10 +948,10 @@ i40evf_dev_xstats_reset(struct rte_eth_dev *dev) struct i40e_eth_stats *pstats = NULL; /* read stat values to clear hardware registers */ - i40evf_update_stats(dev, &pstats); + i40evf_query_stats(dev, &pstats); /* set stats offset base on current values */ - vf->vsi.eth_stats_offset = vf->vsi.eth_stats; + vf->vsi.eth_stats_offset = *pstats; } static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, @@ -1049,17 +975,21 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, int ret; unsigned i; struct i40e_eth_stats *pstats = NULL; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_vsi *vsi = &vf->vsi; if (n < I40EVF_NB_XSTATS) return I40EVF_NB_XSTATS; - ret = i40evf_update_stats(dev, &pstats); + ret = i40evf_query_stats(dev, &pstats); if (ret != 0) return 0; if (!xstats) return 0; + i40evf_update_stats(vsi, pstats); + /* loop over xstats array and values from pstats */ for (i = 0; i < I40EVF_NB_XSTATS; i++) { xstats[i].id = i; @@ -1179,10 +1109,30 @@ i40evf_enable_irq0(struct i40e_hw *hw) } static int -i40evf_reset_vf(struct i40e_hw *hw) +i40evf_check_vf_reset_done(struct i40e_hw *hw) { int i, reset; + for (i = 0; i < MAX_RESET_WAIT_CNT; i++) { + reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT; + if (reset == VIRTCHNL_VFR_VFACTIVE || + reset == VIRTCHNL_VFR_COMPLETED) + break; + rte_delay_ms(50); + } + + if (i >= MAX_RESET_WAIT_CNT) + return -1; + + return 0; +} +static int +i40evf_reset_vf(struct i40e_hw *hw) +{ + int ret; + if (i40e_vf_reset(hw) != I40E_SUCCESS) { PMD_INIT_LOG(ERR, "Reset VF NIC failed"); return -1; @@ -1198,19 +1148,10 @@ i40evf_reset_vf(struct i40e_hw *hw) */ rte_delay_ms(200); - for (i = 0; i < MAX_RESET_WAIT_CNT; i++) { - reset = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT; - if (VIRTCHNL_VFR_COMPLETED == reset || VIRTCHNL_VFR_VFACTIVE == reset) - break; - else - rte_delay_ms(50); - } - - if (i >= MAX_RESET_WAIT_CNT) { - PMD_INIT_LOG(ERR, "Reset VF NIC failed"); - return -1; + ret = i40evf_check_vf_reset_done(hw); + if (ret) { + PMD_INIT_LOG(ERR, "VF is still resetting"); + return ret; } return 0; @@ -1233,6 +1174,10 @@ i40evf_init_vf(struct rte_eth_dev *dev) goto err; } + err = i40evf_check_vf_reset_done(hw); + if (err) + goto err; + i40e_init_adminq_parameter(hw); err = i40e_init_adminq(hw); if (err) { @@ -1249,29 +1194,30 @@ i40evf_init_vf(struct rte_eth_dev *dev) /* VF reset, shutdown admin queue and initialize again */ if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) { PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed"); - return -1; + goto err; } i40e_init_adminq_parameter(hw); if (i40e_init_adminq(hw) != I40E_SUCCESS) { PMD_INIT_LOG(ERR, "init_adminq failed"); - return -1; + goto err; } + vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0); if (!vf->aq_resp) { PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); - goto err_aq; + goto err_aq; } if (i40evf_check_api_version(dev) != 0) { PMD_INIT_LOG(ERR, "check_api version failed"); - goto err_aq; + goto err_api; } bufsz = sizeof(struct virtchnl_vf_resource) + (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); if (!vf->vf_res) { PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); - goto err_aq; + goto err_api; } if (i40evf_get_vf_resource(dev) != 0) { @@ -1293,7 +1239,15 @@ i40evf_init_vf(struct rte_eth_dev *dev) if (hw->mac.type == I40E_MAC_X722_VF) vf->flags = I40E_FLAG_RSS_AQ_CAPABLE; vf->vsi.vsi_id = vf->vsi_res->vsi_id; - vf->vsi.type = (enum i40e_vsi_type)vf->vsi_res->vsi_type; + + switch (vf->vsi_res->vsi_type) { + case VIRTCHNL_VSI_SRIOV: + vf->vsi.type = I40E_VSI_SRIOV; + break; + default: + vf->vsi.type = I40E_VSI_TYPE_UNKNOWN; + break; + } vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs; vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); @@ -1303,20 +1257,20 @@ i40evf_init_vf(struct rte_eth_dev *dev) else eth_random_addr(hw->mac.addr); /* Generate a random one */ - /* If the PF host is not DPDK, set the interval of ITR0 to max*/ - if (vf->version_major != I40E_DPDK_VERSION_MAJOR) { - I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, - (I40E_ITR_INDEX_DEFAULT << - I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) | - (interval << - I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)); - I40EVF_WRITE_FLUSH(hw); - } + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + (I40E_ITR_INDEX_DEFAULT << + I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)); + I40EVF_WRITE_FLUSH(hw); return 0; err_alloc: rte_free(vf->vf_res); + vf->vsi_res = NULL; +err_api: + rte_free(vf->aq_resp); err_aq: i40e_shutdown_adminq(hw); /* ignore error */ err: @@ -1476,7 +1430,6 @@ i40evf_dev_interrupt_handler(void *param) done: i40evf_enable_irq0(hw); - rte_intr_enable(dev->intr_handle); } static int @@ -1503,8 +1456,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) return 0; } i40e_set_default_ptype_table(eth_dev); + i40e_set_default_pctype_table(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->vendor_id = pci_dev->id.vendor_id; hw->device_id = pci_dev->id.device_id; @@ -1586,7 +1539,7 @@ static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_i40evf_pmd = { .id_table = pci_id_i40evf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_i40evf_pci_probe, .remove = eth_i40evf_pci_remove, }; @@ -1630,19 +1583,11 @@ i40evf_dev_configure(struct rte_eth_dev *dev) static int i40evf_init_vlan(struct rte_eth_dev *dev) { - struct rte_eth_dev_data *data = dev->data; - int ret; - /* Apply vlan offload setting */ - i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); - - /* Apply pvid setting */ - ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid, - data->dev_conf.txmode.hw_vlan_insert_pvid); - return ret; + return i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); } -static void +static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct rte_eth_conf *dev_conf = &dev->data->dev_conf; @@ -1655,30 +1600,6 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) else i40evf_disable_vlan_strip(dev); } -} - -static int -i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) -{ - struct rte_eth_conf *dev_conf = &dev->data->dev_conf; - struct i40e_vsi_vlan_pvid_info info; - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - - memset(&info, 0, sizeof(info)); - info.on = on; - - /* Linux pf host don't support vlan offload yet */ - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) { - if (info.on) - info.config.pvid = pvid; - else { - info.config.reject.tagged = - dev_conf->txmode.hw_vlan_reject_tagged; - info.config.reject.untagged = - dev_conf->txmode.hw_vlan_reject_untagged; - } - return i40evf_config_vlan_pvid(dev, &info); - } return 0; } @@ -1899,7 +1820,6 @@ i40evf_tx_init(struct rte_eth_dev *dev) static inline void i40evf_enable_queues_intr(struct rte_eth_dev *dev) { - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; @@ -1914,25 +1834,12 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev) return; } - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) - /* To support DPDK PF host */ - I40E_WRITE_REG(hw, - I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1), - I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); - /* If host driver is kernel driver, do nothing. - * Interrupt 0 is used for rx packets, but don't set - * I40E_VFINT_DYN_CTL01, - * because it is already done in i40evf_enable_irq0. - */ - I40EVF_WRITE_FLUSH(hw); } static inline void i40evf_disable_queues_intr(struct rte_eth_dev *dev) { - struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; @@ -1944,17 +1851,6 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev) return; } - if (vf->version_major == I40E_DPDK_VERSION_MAJOR) - I40E_WRITE_REG(hw, - I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - - 1), - 0); - /* If host driver is kernel driver, do nothing. - * Interrupt 0 is used for rx packets, but don't zero - * I40E_VFINT_DYN_CTL01, - * because interrupt 0 is also used for adminq processing. - */ - I40EVF_WRITE_FLUSH(hw); } @@ -2050,7 +1946,7 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add) addr = &dev->data->mac_addrs[i]; if (is_zero_ether_addr(addr)) continue; - (void)rte_memcpy(list->list[j].addr, addr->addr_bytes, + rte_memcpy(list->list[j].addr, addr->addr_bytes, sizeof(addr->addr_bytes)); PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x", addr->addr_bytes[0], addr->addr_bytes[1], @@ -2124,7 +2020,7 @@ i40evf_dev_start(struct rte_eth_dev *dev) i40evf_tx_init(dev); - if (i40evf_configure_queues(dev) != 0) { + if (i40evf_configure_vsi_queues(dev) != 0) { PMD_DRV_LOG(ERR, "configure queues failed"); goto err_queue; } @@ -2141,7 +2037,20 @@ i40evf_dev_start(struct rte_eth_dev *dev) goto err_mac; } + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in i40evf_dev_init( ). + * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is + * not cleared, it will fail when rte_intr_enable( ) tries to map Rx + * queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); + } + i40evf_enable_queues_intr(dev); + return 0; err_mac: @@ -2155,7 +2064,7 @@ i40evf_dev_stop(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -2216,6 +2125,8 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, new_link.link_duplex = ETH_LINK_FULL_DUPLEX; new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + new_link.link_autoneg = + dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED; i40evf_dev_atomic_write_link_status(dev, &new_link); @@ -2295,7 +2206,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); dev_info->reta_size = ETH_RSS_RETA_SIZE_64; - dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; + dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask; dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | @@ -2346,11 +2257,30 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }; } -static void +static int i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - if (i40evf_get_statistics(dev, stats)) + int ret; + struct i40e_eth_stats *pstats = NULL; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_vsi *vsi = &vf->vsi; + + ret = i40evf_query_stats(dev, &pstats); + if (ret == 0) { + i40evf_update_stats(vsi, pstats); + + stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + + pstats->rx_broadcast; + stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + + pstats->tx_unicast; + stats->imissed = pstats->rx_discards; + stats->oerrors = pstats->tx_errors + pstats->tx_discards; + stats->ibytes = pstats->rx_bytes; + stats->obytes = pstats->tx_bytes; + } else { PMD_DRV_LOG(ERR, "Get statistics failed"); + } + return ret; } static void @@ -2373,6 +2303,23 @@ i40evf_dev_close(struct rte_eth_dev *dev) i40evf_disable_irq0(hw); } +/* + * Reset VF device only to re-initialize resources in PMD layer + */ +static int +i40evf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = i40evf_dev_uninit(dev); + if (ret) + return ret; + + ret = i40evf_dev_init(dev); + + return ret; +} + static int i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { @@ -2580,7 +2527,7 @@ static int i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf) { struct i40e_hw *hw = I40E_VF_TO_HW(vf); - uint64_t rss_hf, hena; + uint64_t hena; int ret; ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key, @@ -2588,14 +2535,7 @@ i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf) if (ret) return ret; - rss_hf = rss_conf->rss_hf; - hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); - hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - if (hw->mac.type == I40E_MAC_X722) - hena &= ~I40E_RSS_HENA_ALL_X722; - else - hena &= ~I40E_RSS_HENA_ALL; - hena |= i40e_config_hena(rss_hf, hw->mac.type); + hena = i40e_config_hena(vf->adapter, rss_conf->rss_hf); i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); I40EVF_WRITE_FLUSH(hw); @@ -2607,16 +2547,9 @@ static void i40evf_disable_rss(struct i40e_vf *vf) { struct i40e_hw *hw = I40E_VF_TO_HW(vf); - uint64_t hena; - hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); - hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - if (hw->mac.type == I40E_MAC_X722) - hena &= ~I40E_RSS_HENA_ALL_X722; - else - hena &= ~I40E_RSS_HENA_ALL; - i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); - i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), 0); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), 0); I40EVF_WRITE_FLUSH(hw); } @@ -2645,7 +2578,7 @@ i40evf_config_rss(struct i40e_vf *vf) } rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf; - if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) { + if ((rss_conf.rss_hf & vf->adapter->flow_types_mask) == 0) { i40evf_disable_rss(vf); PMD_DRV_LOG(DEBUG, "No hash flag is set"); return 0; @@ -2670,14 +2603,13 @@ i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL; + uint64_t rss_hf = rss_conf->rss_hf & vf->adapter->flow_types_mask; uint64_t hena; hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - if (!(hena & ((hw->mac.type == I40E_MAC_X722) - ? I40E_RSS_HENA_ALL_X722 - : I40E_RSS_HENA_ALL))) { /* RSS disabled */ + + if (!(hena & vf->adapter->pctypes_mask)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -EINVAL; return 0; @@ -2703,7 +2635,7 @@ i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - rss_conf->rss_hf = i40e_parse_hena(hena); + rss_conf->rss_hf = i40e_parse_hena(vf->adapter, hena); return 0; } diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index 8013add4..3d7170d5 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -49,6 +49,7 @@ #include <rte_udp.h> #include <rte_tcp.h> #include <rte_sctp.h> +#include <rte_hash_crc.h> #include "i40e_logs.h" #include "base/i40e_type.h" @@ -71,6 +72,16 @@ #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF #define I40E_FDIR_IPv6_PAYLOAD_LEN 380 #define I40E_FDIR_UDP_DEFAULT_LEN 400 +#define I40E_FDIR_GTP_DEFAULT_LEN 384 +#define I40E_FDIR_INNER_IP_DEFAULT_LEN 384 +#define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344 + +#define I40E_FDIR_GTPC_DST_PORT 2123 +#define I40E_FDIR_GTPU_DST_PORT 2152 +#define I40E_FDIR_GTP_VER_FLAG_0X30 0x30 +#define I40E_FDIR_GTP_VER_FLAG_0X32 0x32 +#define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01 +#define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF /* Wait time for fdir filter programming */ #define I40E_FDIR_MAX_WAIT_US 10000 @@ -100,13 +111,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf, enum i40e_filter_pctype pctype, const struct rte_eth_fdir_filter *filter, bool add); -static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input, +static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input, struct i40e_fdir_filter *filter); static struct i40e_fdir_filter * i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info, - const struct rte_eth_fdir_input *input); + const struct i40e_fdir_input *input); static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter); +static int +i40e_flow_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct i40e_fdir_filter_conf *filter, + bool add); static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) @@ -249,7 +265,7 @@ i40e_fdir_setup(struct i40e_pf *pf) goto fail_mem; } pf->fdir.prg_pkt = mz->addr; - pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + pf->fdir.dma_addr = mz->iova; pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id); PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.", @@ -323,6 +339,7 @@ i40e_init_flx_pld(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint8_t pctype; int i, index; + uint16_t flow_type; /* * Define the bytes stream extracted as flexible payload in @@ -344,15 +361,10 @@ i40e_init_flx_pld(struct i40e_pf *pf) /* initialize the masks */ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { - if (hw->mac.type == I40E_MAC_X722) { - if (!I40E_VALID_PCTYPE_X722( - (enum i40e_filter_pctype)pctype)) - continue; - } else { - if (!I40E_VALID_PCTYPE( - (enum i40e_filter_pctype)pctype)) - continue; - } + flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype); + + if (flow_type == RTE_ETH_FLOW_UNKNOWN) + continue; pf->fdir.flex_mask[pctype].word_mask = 0; i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0); for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) { @@ -449,7 +461,8 @@ i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg) * arguments are valid */ static int -i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf) +i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter, + const struct rte_eth_fdir_flex_conf *conf) { const struct rte_eth_flex_payload_cfg *flex_cfg; const struct rte_eth_fdir_flex_mask *flex_mask; @@ -457,6 +470,7 @@ i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf) uint8_t nb_bitmask; uint16_t i, j; int ret = 0; + enum i40e_filter_pctype pctype; if (conf == NULL) { PMD_DRV_LOG(INFO, "NULL pointer."); @@ -487,7 +501,8 @@ i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf) } for (i = 0; i < conf->nb_flexmasks; i++) { flex_mask = &conf->flex_mask[i]; - if (!I40E_VALID_FLOW(flex_mask->flow_type)) { + pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { PMD_DRV_LOG(WARNING, "invalid flow type."); return -EINVAL; } @@ -650,7 +665,7 @@ i40e_fdir_configure(struct rte_eth_dev *dev) i40e_init_flx_pld(pf); /* set flex config to default value */ conf = &dev->data->dev_conf.fdir_conf.flex_conf; - ret = i40e_check_fdir_flex_conf(conf); + ret = i40e_check_fdir_flex_conf(pf->adapter, conf); if (ret < 0) { PMD_DRV_LOG(ERR, " invalid configuration arguments."); return -EINVAL; @@ -664,11 +679,11 @@ i40e_fdir_configure(struct rte_eth_dev *dev) /* get translated pctype value in fd pctype register */ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( hw, I40E_GLQF_FD_PCTYPES( - (int)i40e_flowtype_to_pctype( + (int)i40e_flowtype_to_pctype(pf->adapter, conf->flex_mask[i].flow_type))); } else - pctype = i40e_flowtype_to_pctype( - conf->flex_mask[i].flow_type); + pctype = i40e_flowtype_to_pctype(pf->adapter, + conf->flex_mask[i].flow_type); i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); } @@ -926,6 +941,358 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t); ptr = payload + pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t); + rte_memcpy(ptr, + &fdir_input->flow_ext.flexbytes[dst], + size * sizeof(uint16_t)); + } + + return 0; +} + +static struct i40e_customized_pctype * +i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype) +{ + struct i40e_customized_pctype *cus_pctype; + enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC; + + for (; i < I40E_CUSTOMIZED_MAX; i++) { + cus_pctype = &pf->customized_pctype[i]; + if (pctype == cus_pctype->pctype) + return cus_pctype; + } + return NULL; +} + +static inline int +i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf, + const struct i40e_fdir_input *fdir_input, + unsigned char *raw_pkt, + bool vlan) +{ + struct i40e_customized_pctype *cus_pctype = NULL; + static uint8_t vlan_frame[] = {0x81, 0, 0, 0}; + uint16_t *ether_type; + uint8_t len = 2 * sizeof(struct ether_addr); + struct ipv4_hdr *ip; + struct ipv6_hdr *ip6; + uint8_t pctype = fdir_input->pctype; + bool is_customized_pctype = fdir_input->flow_ext.customized_pctype; + static const uint8_t next_proto[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE, + }; + + raw_pkt += 2 * sizeof(struct ether_addr); + if (vlan && fdir_input->flow_ext.vlan_tci) { + rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame)); + rte_memcpy(raw_pkt + sizeof(uint16_t), + &fdir_input->flow_ext.vlan_tci, + sizeof(uint16_t)); + raw_pkt += sizeof(vlan_frame); + len += sizeof(vlan_frame); + } + ether_type = (uint16_t *)raw_pkt; + raw_pkt += sizeof(uint16_t); + len += sizeof(uint16_t); + + if (is_customized_pctype) { + cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype); + if (!cus_pctype) { + PMD_DRV_LOG(ERR, "unknown pctype %u.", + fdir_input->pctype); + return -1; + } + } + + if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) + *ether_type = fdir_input->flow.l2_flow.ether_type; + else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 || + is_customized_pctype) { + ip = (struct ipv4_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL; + /* set len to by default */ + ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN); + ip->time_to_live = fdir_input->flow.ip4_flow.ttl ? + fdir_input->flow.ip4_flow.ttl : + I40E_FDIR_IP_DEFAULT_TTL; + ip->type_of_service = fdir_input->flow.ip4_flow.tos; + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + ip->src_addr = fdir_input->flow.ip4_flow.dst_ip; + ip->dst_addr = fdir_input->flow.ip4_flow.src_ip; + + if (!is_customized_pctype) + ip->next_proto_id = fdir_input->flow.ip4_flow.proto ? + fdir_input->flow.ip4_flow.proto : + next_proto[fdir_input->pctype]; + else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC || + cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 || + cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 || + cus_pctype->index == I40E_CUSTOMIZED_GTPU) + ip->next_proto_id = IPPROTO_UDP; + len += sizeof(struct ipv4_hdr); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) { + ip6 = (struct ipv6_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + ip6->vtc_flow = + rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW | + (fdir_input->flow.ipv6_flow.tc << + I40E_FDIR_IPv6_TC_OFFSET)); + ip6->payload_len = + rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->proto = fdir_input->flow.ipv6_flow.proto ? + fdir_input->flow.ipv6_flow.proto : + next_proto[fdir_input->pctype]; + ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ? + fdir_input->flow.ipv6_flow.hop_limits : + I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&ip6->src_addr, + &fdir_input->flow.ipv6_flow.dst_ip, + IPV6_ADDR_LEN); + rte_memcpy(&ip6->dst_addr, + &fdir_input->flow.ipv6_flow.src_ip, + IPV6_ADDR_LEN); + len += sizeof(struct ipv6_hdr); + } else { + PMD_DRV_LOG(ERR, "unknown pctype %u.", + fdir_input->pctype); + return -1; + } + + return len; +} + +/** + * i40e_flow_fdir_construct_pkt - construct packet based on fields in input + * @pf: board private structure + * @fdir_input: input set of the flow director entry + * @raw_pkt: a packet to be constructed + */ +static int +i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, + const struct i40e_fdir_input *fdir_input, + unsigned char *raw_pkt) +{ + unsigned char *payload = NULL; + unsigned char *ptr; + struct udp_hdr *udp; + struct tcp_hdr *tcp; + struct sctp_hdr *sctp; + struct rte_flow_item_gtp *gtp; + struct ipv4_hdr *gtp_ipv4; + struct ipv6_hdr *gtp_ipv6; + uint8_t size, dst = 0; + uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/ + int len; + uint8_t pctype = fdir_input->pctype; + struct i40e_customized_pctype *cus_pctype; + + /* raw pcket template - just copy contents of the raw packet */ + if (fdir_input->flow_ext.pkt_template) { + memcpy(raw_pkt, fdir_input->flow.raw_flow.packet, + fdir_input->flow.raw_flow.length); + return 0; + } + + /* fill the ethernet and IP head */ + len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt, + !!fdir_input->flow_ext.vlan_tci); + if (len < 0) + return -EINVAL; + + /* fill the L4 head */ + if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) { + udp = (struct udp_hdr *)(raw_pkt + len); + payload = (unsigned char *)udp + sizeof(struct udp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + udp->src_port = fdir_input->flow.udp4_flow.dst_port; + udp->dst_port = fdir_input->flow.udp4_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) { + tcp = (struct tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + tcp->src_port = fdir_input->flow.tcp4_flow.dst_port; + tcp->dst_port = fdir_input->flow.tcp4_flow.src_port; + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) { + sctp = (struct sctp_hdr *)(raw_pkt + len); + payload = (unsigned char *)sctp + sizeof(struct sctp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + sctp->src_port = fdir_input->flow.sctp4_flow.dst_port; + sctp->dst_port = fdir_input->flow.sctp4_flow.src_port; + sctp->tag = fdir_input->flow.sctp4_flow.verify_tag; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) { + payload = raw_pkt + len; + set_idx = I40E_FLXPLD_L3_IDX; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) { + udp = (struct udp_hdr *)(raw_pkt + len); + payload = (unsigned char *)udp + sizeof(struct udp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + udp->src_port = fdir_input->flow.udp6_flow.dst_port; + udp->dst_port = fdir_input->flow.udp6_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) { + tcp = (struct tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + tcp->src_port = fdir_input->flow.udp6_flow.dst_port; + tcp->dst_port = fdir_input->flow.udp6_flow.src_port; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) { + sctp = (struct sctp_hdr *)(raw_pkt + len); + payload = (unsigned char *)sctp + sizeof(struct sctp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + sctp->src_port = fdir_input->flow.sctp6_flow.dst_port; + sctp->dst_port = fdir_input->flow.sctp6_flow.src_port; + sctp->tag = fdir_input->flow.sctp6_flow.verify_tag; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) { + payload = raw_pkt + len; + set_idx = I40E_FLXPLD_L3_IDX; + } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) { + payload = raw_pkt + len; + /** + * ARP packet is a special case on which the payload + * starts after the whole ARP header + */ + if (fdir_input->flow.l2_flow.ether_type == + rte_cpu_to_be_16(ETHER_TYPE_ARP)) + payload += sizeof(struct arp_hdr); + set_idx = I40E_FLXPLD_L2_IDX; + } else if (fdir_input->flow_ext.customized_pctype) { + /* If customized pctype is used */ + cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype); + if (cus_pctype->index == I40E_CUSTOMIZED_GTPC || + cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 || + cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 || + cus_pctype->index == I40E_CUSTOMIZED_GTPU) { + udp = (struct udp_hdr *)(raw_pkt + len); + udp->dgram_len = + rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); + + gtp = (struct rte_flow_item_gtp *) + ((unsigned char *)udp + sizeof(struct udp_hdr)); + gtp->msg_len = + rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN); + gtp->teid = fdir_input->flow.gtp_flow.teid; + gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01; + + /* GTP-C message type is not supported. */ + if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) { + udp->dst_port = + rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT); + gtp->v_pt_rsv_flags = + I40E_FDIR_GTP_VER_FLAG_0X32; + } else { + udp->dst_port = + rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT); + gtp->v_pt_rsv_flags = + I40E_FDIR_GTP_VER_FLAG_0X30; + } + + if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) { + gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF; + gtp_ipv4 = (struct ipv4_hdr *) + ((unsigned char *)gtp + + sizeof(struct rte_flow_item_gtp)); + gtp_ipv4->version_ihl = + I40E_FDIR_IP_DEFAULT_VERSION_IHL; + gtp_ipv4->next_proto_id = IPPROTO_IP; + gtp_ipv4->total_length = + rte_cpu_to_be_16( + I40E_FDIR_INNER_IP_DEFAULT_LEN); + payload = (unsigned char *)gtp_ipv4 + + sizeof(struct ipv4_hdr); + } else if (cus_pctype->index == + I40E_CUSTOMIZED_GTPU_IPV6) { + gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF; + gtp_ipv6 = (struct ipv6_hdr *) + ((unsigned char *)gtp + + sizeof(struct rte_flow_item_gtp)); + gtp_ipv6->vtc_flow = + rte_cpu_to_be_32( + I40E_FDIR_IPv6_DEFAULT_VTC_FLOW | + (0 << I40E_FDIR_IPv6_TC_OFFSET)); + gtp_ipv6->proto = IPPROTO_NONE; + gtp_ipv6->payload_len = + rte_cpu_to_be_16( + I40E_FDIR_INNER_IPV6_DEFAULT_LEN); + gtp_ipv6->hop_limits = + I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + payload = (unsigned char *)gtp_ipv6 + + sizeof(struct ipv6_hdr); + } else + payload = (unsigned char *)gtp + + sizeof(struct rte_flow_item_gtp); + } + } else { + PMD_DRV_LOG(ERR, "unknown pctype %u.", + fdir_input->pctype); + return -1; + } + + /* fill the flexbytes to payload */ + for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) { + pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i; + size = pf->fdir.flex_set[pit_idx].size; + if (size == 0) + continue; + dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t); + ptr = payload + + pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t); (void)rte_memcpy(ptr, &fdir_input->flow_ext.flexbytes[dst], size * sizeof(uint16_t)); @@ -1007,21 +1374,34 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq) } static int -i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input, +i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input, struct i40e_fdir_filter *filter) { - rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter)); + rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf)); + if (input->input.flow_ext.pkt_template) { + filter->fdir.input.flow.raw_flow.packet = NULL; + filter->fdir.input.flow.raw_flow.length = + rte_hash_crc(input->input.flow.raw_flow.packet, + input->input.flow.raw_flow.length, + input->input.flow.raw_flow.pctype); + } return 0; } /* Check if there exists the flow director filter */ static struct i40e_fdir_filter * i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info, - const struct rte_eth_fdir_input *input) + const struct i40e_fdir_input *input) { int ret; - ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input); + if (input->flow_ext.pkt_template) + ret = rte_hash_lookup_with_hash(fdir_info->hash_table, + (const void *)input, + input->flow.raw_flow.length); + else + ret = rte_hash_lookup(fdir_info->hash_table, + (const void *)input); if (ret < 0) return NULL; @@ -1035,8 +1415,13 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter) struct i40e_fdir_info *fdir_info = &pf->fdir; int ret; - ret = rte_hash_add_key(fdir_info->hash_table, - &filter->fdir.input); + if (filter->fdir.input.flow_ext.pkt_template) + ret = rte_hash_add_key_with_hash(fdir_info->hash_table, + &filter->fdir.input, + filter->fdir.input.flow.raw_flow.length); + else + ret = rte_hash_add_key(fdir_info->hash_table, + &filter->fdir.input); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to insert fdir filter to hash table %d!", @@ -1052,13 +1437,18 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter) /* Delete a flow director filter from the SW list */ int -i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input) +i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input) { struct i40e_fdir_info *fdir_info = &pf->fdir; struct i40e_fdir_filter *filter; int ret; - ret = rte_hash_del_key(fdir_info->hash_table, input); + if (input->flow_ext.pkt_template) + ret = rte_hash_del_key_with_hash(fdir_info->hash_table, + input, + input->flow.raw_flow.length); + else + ret = rte_hash_del_key(fdir_info->hash_table, input); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete fdir filter to hash table %d!", @@ -1082,16 +1472,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input) */ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev, - const struct rte_eth_fdir_filter *filter, - bool add) + const struct rte_eth_fdir_filter *filter, + bool add) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; enum i40e_filter_pctype pctype; - struct i40e_fdir_info *fdir_info = &pf->fdir; - struct i40e_fdir_filter *fdir_filter, *node; - struct i40e_fdir_filter check_filter; /* Check if the filter exists */ int ret = 0; if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { @@ -1100,7 +1487,8 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev, return -ENOTSUP; } - if (!I40E_VALID_FLOW(filter->input.flow_type)) { + pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } @@ -1114,6 +1502,76 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev, return -EINVAL; } + memset(pkt, 0, I40E_FDIR_PKT_LEN); + + ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt); + if (ret < 0) { + PMD_DRV_LOG(ERR, "construct packet for fdir fails."); + return ret; + } + + if (hw->mac.type == I40E_MAC_X722) { + /* get translated pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( + hw, I40E_GLQF_FD_PCTYPES((int)pctype)); + } + + ret = i40e_fdir_filter_programming(pf, pctype, filter, add); + if (ret < 0) { + PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).", + pctype); + return ret; + } + + return ret; +} + +/** + * i40e_flow_add_del_fdir_filter - add or remove a flow director filter. + * @pf: board private structure + * @filter: fdir filter entry + * @add: 0 - delete, 1 - add + */ +int +i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct i40e_fdir_filter_conf *filter, + bool add) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + enum i40e_filter_pctype pctype; + struct i40e_fdir_info *fdir_info = &pf->fdir; + struct i40e_fdir_filter *fdir_filter, *node; + struct i40e_fdir_filter check_filter; /* Check if the filter exists */ + int ret = 0; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { + PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf."); + return -ENOTSUP; + } + + if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + if (filter->input.flow_ext.is_vf && + filter->input.flow_ext.dst_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid VF ID"); + return -EINVAL; + } + if (filter->input.flow_ext.pkt_template) { + if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN || + !filter->input.flow.raw_flow.packet) { + PMD_DRV_LOG(ERR, "Invalid raw packet template" + " flow filter parameters!"); + return -EINVAL; + } + pctype = filter->input.flow.raw_flow.pctype; + } else { + pctype = filter->input.pctype; + } + /* Check if there is the filter in SW list */ memset(&check_filter, 0, sizeof(check_filter)); i40e_fdir_filter_convert(filter, &check_filter); @@ -1132,7 +1590,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev, memset(pkt, 0, I40E_FDIR_PKT_LEN); - ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt); + ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt); if (ret < 0) { PMD_DRV_LOG(ERR, "construct packet for fdir fails."); return ret; @@ -1141,13 +1599,10 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev, if (hw->mac.type == I40E_MAC_X722) { /* get translated pctype value in fd pctype register */ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( - hw, I40E_GLQF_FD_PCTYPES( - (int)i40e_flowtype_to_pctype( - filter->input.flow_type))); - } else - pctype = i40e_flowtype_to_pctype(filter->input.flow_type); + hw, I40E_GLQF_FD_PCTYPES((int)pctype)); + } - ret = i40e_fdir_filter_programming(pf, pctype, filter, add); + ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add); if (ret < 0) { PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).", pctype); @@ -1302,6 +1757,140 @@ i40e_fdir_filter_programming(struct i40e_pf *pf, } /* + * i40e_flow_fdir_filter_programming - Program a flow director filter rule. + * Is done by Flow Director Programming Descriptor followed by packet + * structure that contains the filter fields need to match. + * @pf: board private structure + * @pctype: pctype + * @filter: fdir filter entry + * @add: 0 - delete, 1 - add + */ +static int +i40e_flow_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct i40e_fdir_filter_conf *filter, + bool add) +{ + struct i40e_tx_queue *txq = pf->fdir.txq; + struct i40e_rx_queue *rxq = pf->fdir.rxq; + const struct i40e_fdir_action *fdir_action = &filter->action; + volatile struct i40e_tx_desc *txdp; + volatile struct i40e_filter_program_desc *fdirdp; + uint32_t td_cmd; + uint16_t vsi_id, i; + uint8_t dest; + + PMD_DRV_LOG(INFO, "filling filter programming descriptor."); + fdirdp = (volatile struct i40e_filter_program_desc *) + (&txq->tx_ring[txq->tx_tail]); + + fdirdp->qindex_flex_ptype_vsi = + rte_cpu_to_le_32((fdir_action->rx_queue << + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((fdir_action->flex_off << + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((pctype << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK); + + if (filter->input.flow_ext.is_vf) + vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id; + else + /* Use LAN VSI Id by default */ + vsi_id = pf->main_vsi->vsi_id; + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32(((uint32_t)vsi_id << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK); + + fdirdp->dtype_cmd_cntindex = + rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG); + + if (add) + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + else + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + + if (fdir_action->behavior == I40E_FDIR_REJECT) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else if (fdir_action->behavior == I40E_FDIR_ACCEPT) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + else if (fdir_action->behavior == I40E_FDIR_PASSTHRU) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER; + else { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior."); + return -EINVAL; + } + + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest << + I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32((fdir_action->report_status << + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32( + ((uint32_t)pf->fdir.match_counter_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK); + + fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id); + + PMD_DRV_LOG(INFO, "filling transmit descriptor."); + txdp = &txq->tx_ring[txq->tx_tail + 1]; + txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); + td_cmd = I40E_TX_DESC_CMD_EOP | + I40E_TX_DESC_CMD_RS | + I40E_TX_DESC_CMD_DUMMY; + + txdp->cmd_type_offset_bsz = + i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0); + + txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */ + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + /* Update the tx tail register */ + rte_wmb(); + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) { + if ((txdp->cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) == + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + break; + rte_delay_us(1); + } + if (i >= I40E_FDIR_MAX_WAIT_US) { + PMD_DRV_LOG(ERR, + "Failed to program FDIR filter: time out to get DD on tx queue."); + return -ETIMEDOUT; + } + /* totally delay 10 ms to check programming status*/ + rte_delay_us(I40E_FDIR_MAX_WAIT_US); + if (i40e_check_fdir_programming_status(rxq) < 0) { + PMD_DRV_LOG(ERR, + "Failed to program FDIR filter: programming status reported."); + return -ETIMEDOUT; + } + + return 0; +} + +/* * i40e_fdir_flush - clear all filters of Flow Director table * @pf: board private structure */ @@ -1384,7 +1973,6 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf, { struct i40e_fdir_flex_mask *mask; struct rte_eth_fdir_flex_mask *ptr = flex_mask; - struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint16_t flow_type; uint8_t i, j; uint16_t off_bytes, mask_tmp; @@ -1393,14 +1981,11 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf, i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) { mask = &pf->fdir.flex_mask[i]; - if (hw->mac.type == I40E_MAC_X722) { - if (!I40E_VALID_PCTYPE_X722((enum i40e_filter_pctype)i)) - continue; - } else { - if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i)) - continue; - } - flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i); + flow_type = i40e_pctype_to_flowtype(pf->adapter, + (enum i40e_filter_pctype)i); + if (flow_type == RTE_ETH_FLOW_UNKNOWN) + continue; + for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) { if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) { ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX; @@ -1580,7 +2165,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf) uint32_t best_cnt; /**< Number of filters in best effort spaces. */ TAILQ_FOREACH(f, fdir_list, rules) - i40e_add_del_fdir_filter(dev, &f->fdir, TRUE); + i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE); fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT); guarant_cnt = diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index b92719a3..7e4936e3 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -41,7 +41,6 @@ #include <rte_ether.h> #include <rte_ethdev.h> #include <rte_log.h> -#include <rte_memzone.h> #include <rte_malloc.h> #include <rte_eth_ctrl.h> #include <rte_tailq.h> @@ -84,11 +83,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *pattern, struct rte_flow_error *error, - struct rte_eth_fdir_filter *filter); + struct i40e_fdir_filter_conf *filter); static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, struct rte_flow_error *error, - struct rte_eth_fdir_filter *filter); + struct i40e_fdir_filter_conf *filter); static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, struct rte_flow_error *error, @@ -125,6 +124,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, union i40e_filter_t *filter); +static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, @@ -189,6 +194,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = { RTE_FLOW_ITEM_TYPE_END, }; +static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPC, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + static enum rte_flow_item_type pattern_fdir_ipv6[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV6, @@ -216,6 +255,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = { RTE_FLOW_ITEM_TYPE_END, }; +static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPC, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_RAW, @@ -1576,10 +1649,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter }, { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter }, /* FDIR - support default flow type with flexible payload */ { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter }, { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter }, @@ -1732,6 +1813,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_mpls_2, i40e_flow_parse_mpls_filter }, { pattern_mpls_3, i40e_flow_parse_mpls_filter }, { pattern_mpls_4, i40e_flow_parse_mpls_filter }, + /* GTP-C & GTP-U */ + { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter }, + { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter }, + { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter }, + { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter }, /* QINQ */ { pattern_qinq_1, i40e_flow_parse_qinq_filter }, }; @@ -2302,20 +2388,58 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf, return 0; } +static uint8_t +i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, + enum rte_flow_item_type item_type, + struct i40e_fdir_filter_conf *filter) +{ + struct i40e_customized_pctype *cus_pctype = NULL; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_GTPC: + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPC); + break; + case RTE_FLOW_ITEM_TYPE_GTPU: + if (!filter->input.flow_ext.inner_ip) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU); + else if (filter->input.flow_ext.iip_type == + I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV4); + else if (filter->input.flow_ext.iip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV6); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported item type"); + break; + } + + if (cus_pctype) + return cus_pctype->pctype; + + return I40E_FILTER_PCTYPE_INVALID; +} + /* 1. Last in item should be NULL as range is not supported. * 2. Supported patterns: refer to array i40e_supported_patterns. - * 3. Supported flow type and input set: refer to array + * 3. Default supported flow type and input set: refer to array * valid_fdir_inset_table in i40e_ethdev.c. * 4. Mask of fields which need to be matched should be * filled with 1. * 5. Mask of fields which needn't to be matched should be * filled with 0. + * 6. GTP profile supports GTPv1 only. + * 7. GTP-C response message ('source_port' = 2123) is not supported. */ static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *pattern, struct rte_flow_error *error, - struct rte_eth_fdir_filter *filter) + struct i40e_fdir_filter_conf *filter) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); const struct rte_flow_item *item = pattern; @@ -2326,15 +2450,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; const struct rte_flow_item_udp *udp_spec, *udp_mask; const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; const struct rte_flow_item_raw *raw_spec, *raw_mask; const struct rte_flow_item_vf *vf_spec; - uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN; - enum i40e_filter_pctype pctype; + uint8_t pctype = 0; uint64_t input_set = I40E_INSET_NONE; uint16_t frag_off; enum rte_flow_item_type item_type; enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; + enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END; uint32_t i, j; uint8_t ipv6_addr_mask[16] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, @@ -2352,12 +2477,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, uint16_t outer_tpid; uint16_t ether_type; uint32_t vtc_flow_cpu; + bool outer_ip = true; int ret; memset(off_arr, 0, sizeof(off_arr)); memset(len_arr, 0, sizeof(len_arr)); memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN); outer_tpid = i40e_get_outer_vlan(dev); + filter->input.flow_ext.customized_pctype = false; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { rte_flow_error_set(error, EINVAL, @@ -2402,7 +2529,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, } } - flow_type = RTE_ETH_FLOW_L2_PAYLOAD; + pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; layer_idx = I40E_FLXPLD_L2_IDX; break; @@ -2420,7 +2547,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, } } - flow_type = RTE_ETH_FLOW_L2_PAYLOAD; + pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; layer_idx = I40E_FLXPLD_L2_IDX; break; @@ -2430,8 +2557,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, (const struct rte_flow_item_ipv4 *)item->spec; ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + layer_idx = I40E_FLXPLD_L3_IDX; - if (ipv4_spec && ipv4_mask) { + if (ipv4_spec && ipv4_mask && outer_ip) { /* Check IPv4 mask and update input set */ if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.total_length || @@ -2456,14 +2585,12 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) input_set |= I40E_INSET_IPV4_PROTO; - /* Get filter info */ - flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER; /* Check if it is fragment. */ frag_off = ipv4_spec->hdr.fragment_offset; frag_off = rte_be_to_cpu_16(frag_off); if (frag_off & IPV4_HDR_OFFSET_MASK || frag_off & IPV4_HDR_MF_FLAG) - flow_type = RTE_ETH_FLOW_FRAG_IPV4; + pctype = I40E_FILTER_PCTYPE_FRAG_IPV4; /* Get the filter info */ filter->input.flow.ip4_flow.proto = @@ -2476,9 +2603,20 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, ipv4_spec->hdr.src_addr; filter->input.flow.ip4_flow.dst_ip = ipv4_spec->hdr.dst_addr; + } else if (!ipv4_spec && !ipv4_mask && !outer_ip) { + filter->input.flow_ext.inner_ip = true; + filter->input.flow_ext.iip_type = + I40E_FDIR_IPTYPE_IPV4; + } else if ((ipv4_spec || ipv4_mask) && !outer_ip) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner IPv4 mask."); + return -rte_errno; } - layer_idx = I40E_FLXPLD_L3_IDX; + if (outer_ip) + outer_ip = false; break; case RTE_FLOW_ITEM_TYPE_IPV6: @@ -2487,8 +2625,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, (const struct rte_flow_item_ipv6 *)item->spec; ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask; + pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; + layer_idx = I40E_FLXPLD_L3_IDX; - if (ipv6_spec && ipv6_mask) { + if (ipv6_spec && ipv6_mask && outer_ip) { /* Check IPv6 mask and update input set */ if (ipv6_mask->hdr.payload_len) { rte_flow_error_set(error, EINVAL, @@ -2535,20 +2675,32 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, /* Check if it is fragment. */ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER) - flow_type = - RTE_ETH_FLOW_FRAG_IPV6; - else - flow_type = - RTE_ETH_FLOW_NONFRAG_IPV6_OTHER; + pctype = I40E_FILTER_PCTYPE_FRAG_IPV6; + } else if (!ipv6_spec && !ipv6_mask && !outer_ip) { + filter->input.flow_ext.inner_ip = true; + filter->input.flow_ext.iip_type = + I40E_FDIR_IPTYPE_IPV6; + } else if ((ipv6_spec || ipv6_mask) && !outer_ip) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner IPv6 mask"); + return -rte_errno; } - layer_idx = I40E_FLXPLD_L3_IDX; - + if (outer_ip) + outer_ip = false; break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = (const struct rte_flow_item_tcp *)item->spec; tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV6_TCP; if (tcp_spec && tcp_mask) { /* Check TCP mask and update input set */ if (tcp_mask->hdr.sent_seq || @@ -2571,13 +2723,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, input_set |= I40E_INSET_DST_PORT; /* Get filter info */ - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) - flow_type = - RTE_ETH_FLOW_NONFRAG_IPV4_TCP; - else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) - flow_type = - RTE_ETH_FLOW_NONFRAG_IPV6_TCP; - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { filter->input.flow.tcp4_flow.src_port = tcp_spec->hdr.src_port; @@ -2598,6 +2743,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, udp_spec = (const struct rte_flow_item_udp *)item->spec; udp_mask = (const struct rte_flow_item_udp *)item->mask; + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + if (udp_spec && udp_mask) { /* Check UDP mask and update input set*/ if (udp_mask->hdr.dgram_len || @@ -2615,13 +2767,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, input_set |= I40E_INSET_DST_PORT; /* Get filter info */ - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) - flow_type = - RTE_ETH_FLOW_NONFRAG_IPV4_UDP; - else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) - flow_type = - RTE_ETH_FLOW_NONFRAG_IPV6_UDP; - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { filter->input.flow.udp4_flow.src_port = udp_spec->hdr.src_port; @@ -2638,12 +2783,50 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, layer_idx = I40E_FLXPLD_L4_IDX; break; + case RTE_FLOW_ITEM_TYPE_GTPC: + case RTE_FLOW_ITEM_TYPE_GTPU: + if (!pf->gtp_support) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported protocol"); + return -rte_errno; + } + + gtp_spec = (const struct rte_flow_item_gtp *)item->spec; + gtp_mask = (const struct rte_flow_item_gtp *)item->mask; + + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len || + gtp_mask->teid != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP mask"); + return -rte_errno; + } + + filter->input.flow.gtp_flow.teid = + gtp_spec->teid; + filter->input.flow_ext.customized_pctype = true; + cus_proto = item_type; + } + break; case RTE_FLOW_ITEM_TYPE_SCTP: sctp_spec = (const struct rte_flow_item_sctp *)item->spec; sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; + if (sctp_spec && sctp_mask) { /* Check SCTP mask and update input set */ if (sctp_mask->hdr.cksum) { @@ -2662,13 +2845,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, input_set |= I40E_INSET_SCTP_VT; /* Get filter info */ - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) - flow_type = - RTE_ETH_FLOW_NONFRAG_IPV4_SCTP; - else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) - flow_type = - RTE_ETH_FLOW_NONFRAG_IPV6_SCTP; - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { filter->input.flow.sctp4_flow.src_port = sctp_spec->hdr.src_port; @@ -2776,51 +2952,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, } } - pctype = i40e_flowtype_to_pctype(flow_type); - if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Unsupported flow type"); - return -rte_errno; + /* Get customized pctype value */ + if (filter->input.flow_ext.customized_pctype) { + pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported pctype"); + return -rte_errno; + } } - ret = i40e_flow_set_fdir_inset(pf, pctype, input_set); - if (ret == -1) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Conflict with the first rule's input set."); - return -rte_errno; - } else if (ret == -EINVAL) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Invalid pattern mask."); - return -rte_errno; - } + /* If customized pctype is not used, set fdir configuration.*/ + if (!filter->input.flow_ext.customized_pctype) { + ret = i40e_flow_set_fdir_inset(pf, pctype, input_set); + if (ret == -1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Conflict with the first rule's input set."); + return -rte_errno; + } else if (ret == -EINVAL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid pattern mask."); + return -rte_errno; + } - filter->input.flow_type = flow_type; + /* Store flex mask to SW */ + ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask); + if (ret == -1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Exceed maximal number of bitmasks"); + return -rte_errno; + } else if (ret == -2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Conflict with the first flexible rule"); + return -rte_errno; + } else if (ret > 0) + cfg_flex_msk = false; - /* Store flex mask to SW */ - ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask); - if (ret == -1) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Exceed maximal number of bitmasks"); - return -rte_errno; - } else if (ret == -2) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Conflict with the first flexible rule"); - return -rte_errno; - } else if (ret > 0) - cfg_flex_msk = false; + if (cfg_flex_pit) + i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id); - if (cfg_flex_pit) - i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id); + if (cfg_flex_msk) + i40e_flow_set_fdir_flex_msk(pf, pctype); + } - if (cfg_flex_msk) - i40e_flow_set_fdir_flex_msk(pf, pctype); + filter->input.pctype = pctype; return 0; } @@ -2832,7 +3015,7 @@ static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, struct rte_flow_error *error, - struct rte_eth_fdir_filter *filter) + struct i40e_fdir_filter_conf *filter) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); const struct rte_flow_action *act; @@ -2855,13 +3038,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, "Invalid queue ID for FDIR."); return -rte_errno; } - filter->action.behavior = RTE_ETH_FDIR_ACCEPT; + filter->action.behavior = I40E_FDIR_ACCEPT; break; case RTE_FLOW_ACTION_TYPE_DROP: - filter->action.behavior = RTE_ETH_FDIR_REJECT; + filter->action.behavior = I40E_FDIR_REJECT; break; case RTE_FLOW_ACTION_TYPE_PASSTHRU: - filter->action.behavior = RTE_ETH_FDIR_PASSTHRU; + filter->action.behavior = I40E_FDIR_PASSTHRU; break; default: rte_flow_error_set(error, EINVAL, @@ -2876,11 +3059,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, switch (act->type) { case RTE_FLOW_ACTION_TYPE_MARK: mark_spec = (const struct rte_flow_action_mark *)act->conf; - filter->action.report_status = RTE_ETH_FDIR_REPORT_ID; + filter->action.report_status = I40E_FDIR_REPORT_ID; filter->soft_id = mark_spec->id; break; case RTE_FLOW_ACTION_TYPE_FLAG: - filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS; + filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS; break; case RTE_FLOW_ACTION_TYPE_END: return 0; @@ -2911,7 +3094,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, struct rte_flow_error *error, union i40e_filter_t *filter) { - struct rte_eth_fdir_filter *fdir_filter = + struct i40e_fdir_filter_conf *fdir_filter = &filter->fdir_filter; int ret; @@ -3646,6 +3829,148 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev, } /* 1. Last in item should be NULL as range is not supported. + * 2. Supported filter types: GTP TEID. + * 3. Mask of fields which need to be matched should be + * filled with 1. + * 4. Mask of fields which needn't to be matched should be + * filled with 0. + * 5. GTP profile supports GTPv1 only. + * 6. GTP-C response message ('source_port' = 2123) is not supported. + */ +static int +i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_gtp *gtp_spec; + const struct rte_flow_item_gtp *gtp_mask; + enum rte_flow_item_type item_type; + + if (!pf->gtp_support) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "GTP is not supported by default."); + return -rte_errno; + } + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ETH item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4; + /* IPv4 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_GTPC: + case RTE_FLOW_ITEM_TYPE_GTPU: + gtp_spec = + (const struct rte_flow_item_gtp *)item->spec; + gtp_mask = + (const struct rte_flow_item_gtp *)item->mask; + + if (!gtp_spec || !gtp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP item"); + return -rte_errno; + } + + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len || + gtp_mask->teid != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP mask"); + return -rte_errno; + } + + if (item_type == RTE_FLOW_ITEM_TYPE_GTPC) + filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC; + else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU) + filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU; + + filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid); + + break; + default: + break; + } + } + + return 0; +} + +static int +i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_tunnel_filter_conf *tunnel_filter = + &filter->consistent_tunnel_filter; + int ret; + + ret = i40e_flow_parse_gtp_pattern(dev, pattern, + error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_TUNNEL; + + return ret; +} + +/* 1. Last in item should be NULL as range is not supported. * 2. Supported filter types: QINQ. * 3. Mask of fields which need to be matched should be * filled with 1. @@ -3877,7 +4202,7 @@ i40e_flow_create(struct rte_eth_dev *dev, i40e_ethertype_filter_list); break; case RTE_ETH_FILTER_FDIR: - ret = i40e_add_del_fdir_filter(dev, + ret = i40e_flow_add_del_fdir_filter(dev, &cons_filter.fdir_filter, 1); if (ret) goto free_flow; @@ -3927,7 +4252,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev, (struct i40e_tunnel_filter *)flow->rule); break; case RTE_ETH_FILTER_FDIR: - ret = i40e_add_del_fdir_filter(dev, + ret = i40e_flow_add_del_fdir_filter(dev, &((struct i40e_fdir_filter *)flow->rule)->fdir, 0); break; default: @@ -4016,12 +4341,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, vsi = vf->vsi; } - if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) == - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) || - ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) == - I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) || - ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) == - I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ)) + if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) == + I40E_AQC_ADD_CLOUD_FILTER_0X11) || + ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) == + I40E_AQC_ADD_CLOUD_FILTER_0X12) || + ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) == + I40E_AQC_ADD_CLOUD_FILTER_0X10)) big_buffer = 1; if (big_buffer) diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index 100f8dc2..94bb0cfd 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -44,7 +44,6 @@ #include <rte_pci.h> #include <rte_ether.h> #include <rte_ethdev.h> -#include <rte_memzone.h> #include <rte_malloc.h> #include <rte_memcpy.h> @@ -538,73 +537,6 @@ send_msg: return ret; } -static int -i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf, - uint8_t *msg, - uint16_t msglen, - bool b_op) -{ - struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); - struct i40e_vsi *vsi = vf->vsi; - struct virtchnl_vsi_queue_config_ext_info *vc_vqcei = - (struct virtchnl_vsi_queue_config_ext_info *)msg; - struct virtchnl_queue_pair_ext_info *vc_qpei; - int i, ret = I40E_SUCCESS; - - if (!b_op) { - i40e_pf_host_send_msg_to_vf( - vf, - VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, - I40E_NOT_SUPPORTED, NULL, 0); - return ret; - } - - if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps || - vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP || - msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, - vc_vqcei->num_queue_pairs)) { - PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong"); - ret = I40E_ERR_PARAM; - goto send_msg; - } - - vc_qpei = vc_vqcei->qpair; - for (i = 0; i < vc_vqcei->num_queue_pairs; i++) { - if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 || - vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) { - ret = I40E_ERR_PARAM; - goto send_msg; - } - /* - * Apply VF RX queue setting to HMC. - * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, - * then the extra information of - * 'struct virtchnl_queue_pair_ext_info' is needed, - * otherwise set the last parameter to NULL. - */ - if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq, - vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Configure RX queue HMC failed"); - ret = I40E_ERR_PARAM; - goto send_msg; - } - - /* Apply VF TX queue setting to HMC */ - if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) != - I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Configure TX queue HMC failed"); - ret = I40E_ERR_PARAM; - goto send_msg; - } - } - -send_msg: - i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, - ret, NULL, 0); - - return ret; -} - static void i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf, struct virtchnl_vector_map *vvm) @@ -714,7 +646,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, (struct virtchnl_irq_map_info *)msg; struct virtchnl_vector_map *map; int i; - uint16_t vector_id; + uint16_t vector_id, itr_idx; unsigned long qbit_max; if (!b_op) { @@ -741,12 +673,13 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, vf->vsi->msix_intr = irqmap->vecmap[0].vector_id; vf->vsi->nb_msix = irqmap->num_vectors; vf->vsi->nb_used_qps = vf->vsi->nb_qps; + itr_idx = irqmap->vecmap[0].rxitr_idx; /* Don't care how the TX/RX queue mapping with this vector. * Link all VF RX queues together. Only did mapping work. * VF can disable/enable the intr by itself. */ - i40e_vsi_queues_bind_intr(vf->vsi); + i40e_vsi_queues_bind_intr(vf->vsi, itr_idx); goto send_msg; } @@ -909,7 +842,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, for (i = 0; i < addr_list->num_elements; i++) { mac = (struct ether_addr *)(addr_list->list[i].addr); - (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN); + rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; if (is_zero_ether_addr(mac) || i40e_vsi_add_mac(vf->vsi, &filter)) { @@ -1157,42 +1090,13 @@ i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op) return ret; } -static int -i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf, - uint8_t *msg, - uint16_t msglen, - bool b_op) -{ - int ret = I40E_SUCCESS; - struct virtchnl_pvid_info *tpid_info = - (struct virtchnl_pvid_info *)msg; - - if (!b_op) { - i40e_pf_host_send_msg_to_vf( - vf, - I40E_VIRTCHNL_OP_CFG_VLAN_PVID, - I40E_NOT_SUPPORTED, NULL, 0); - return ret; - } - - if (msg == NULL || msglen != sizeof(*tpid_info)) { - ret = I40E_ERR_PARAM; - goto send_msg; - } - - ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info); - -send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID, - ret, NULL, 0); - - return ret; -} - void i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) { + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); struct virtchnl_pf_event event; + uint16_t vf_id = vf->vf_idx; + uint32_t tval, rval; event.event = VIRTCHNL_EVENT_LINK_CHANGE; event.event_data.link_event.link_status = @@ -1224,8 +1128,15 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) break; } - i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT, - I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); + tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id)); + rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id)); + + if (tval & I40E_VF_ATQLEN_ATQLEN_MASK || + tval & I40E_VF_ATQLEN_ATQENABLE_MASK || + rval & I40E_VF_ARQLEN_ARQLEN_MASK || + rval & I40E_VF_ARQLEN_ARQENABLE_MASK) + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT, + I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); } void @@ -1300,11 +1211,6 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen, b_op); break; - case VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT: - PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received"); - i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg, - msglen, b_op); - break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received"); i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op); @@ -1359,10 +1265,6 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received"); i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op); break; - case I40E_VIRTCHNL_OP_CFG_VLAN_PVID: - PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received"); - i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op); - break; /* Don't add command supported below, which will * return an error code. */ diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h index 7afb7eae..04116637 100644 --- a/drivers/net/i40e/i40e_pf.h +++ b/drivers/net/i40e/i40e_pf.h @@ -34,58 +34,9 @@ #ifndef _I40E_PF_H_ #define _I40E_PF_H_ -/* VERSION info to exchange between VF and PF host. In case VF works with - * ND kernel driver, it reads VIRTCHNL_VERSION_MAJOR/MINOR. In - * case works with DPDK host, it reads version below. Then VF realize who it - * is talking to and use proper language to communicate. - * */ -#define I40E_DPDK_SIGNATURE ('D' << 24 | 'P' << 16 | 'D' << 8 | 'K') -#define I40E_DPDK_VERSION_MAJOR I40E_DPDK_SIGNATURE -#define I40E_DPDK_VERSION_MINOR 0 - /* Default setting on number of VSIs that VF can contain */ #define I40E_DEFAULT_VF_VSI_NUM 1 -#define I40E_DPDK_OFFSET 0x100 - -/* DPDK pf driver specific command to VF */ -enum virtchnl_ops_dpdk { - /* - * Keep some gap between Linux PF commands and - * DPDK PF extended commands. - */ - I40E_VIRTCHNL_OP_CFG_VLAN_PVID = VIRTCHNL_OP_VERSION + - I40E_DPDK_OFFSET, - VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, -}; - -/* A structure to support extended info of a receive queue. */ -struct virtchnl_rxq_ext_info { - uint8_t crcstrip; -}; - -/* - * A structure to support extended info of queue pairs, an additional field - * is added, comparing to original 'struct virtchnl_queue_pair_info'. - */ -struct virtchnl_queue_pair_ext_info { - /* vsi_id and queue_id should be identical for both rx and tx queues.*/ - struct virtchnl_txq_info txq; - struct virtchnl_rxq_info rxq; - struct virtchnl_rxq_ext_info rxq_ext; -}; - -/* - * A structure to support extended info of VSI queue pairs, - * 'struct virtchnl_queue_pair_ext_info' is used, see its original - * of 'struct virtchnl_queue_pair_info'. - */ -struct virtchnl_vsi_queue_config_ext_info { - uint16_t vsi_id; - uint16_t num_queue_pairs; - struct virtchnl_queue_pair_ext_info qpair[0]; -}; - struct virtchnl_vlan_offload_info { uint16_t vsi_id; uint8_t enable_vlan_strip; @@ -99,17 +50,6 @@ struct virtchnl_vlan_offload_info { #define I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(x, n) \ (sizeof(*(x)) + sizeof((x)->qpair[0]) * (n)) -/* - * I40E_VIRTCHNL_OP_CFG_VLAN_PVID - * VF sends this message to enable/disable pvid. If it's - * enable op, needs to specify the pvid. PF returns status - * code in retval. - */ -struct virtchnl_pvid_info { - uint16_t vsi_id; - struct i40e_vsi_vlan_pvid_info info; -}; - int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset); void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, uint16_t abs_vf_id, uint32_t opcode, diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index d42c23c0..8b4f612f 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -108,7 +108,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) { if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { - mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1); PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", @@ -589,7 +589,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) mb->nb_segs = 1; mb->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(\ - rte_mbuf_data_dma_addr_default(mb)); + rte_mbuf_data_iova_default(mb)); rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -752,7 +752,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; @@ -869,7 +869,7 @@ i40e_recv_scattered_pkts(void *rx_queue, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); /* Set data buffer address and data length of the mbuf */ rxdp->read.hdr_addr = 0; @@ -1202,7 +1202,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Setup TX Descriptor */ slen = m_seg->data_len; - buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + buf_dma_addr = rte_mbuf_data_iova(m_seg); PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n" "buf_dma_addr: %#"PRIx64";\n" @@ -1301,7 +1301,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) uint32_t i; for (i = 0; i < 4; i++, txdp++, pkts++) { - dma_addr = rte_mbuf_data_dma_addr(*pkts); + dma_addr = rte_mbuf_data_iova(*pkts); txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); txdp->cmd_type_offset_bsz = i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, @@ -1315,7 +1315,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) { uint64_t dma_addr; - dma_addr = rte_mbuf_data_dma_addr(*pkts); + dma_addr = rte_mbuf_data_iova(*pkts); txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); txdp->cmd_type_offset_bsz = i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, @@ -1734,36 +1734,42 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { - struct i40e_vsi *vsi; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_vsi *vsi; + struct i40e_pf *pf = NULL; + struct i40e_vf *vf = NULL; struct i40e_rx_queue *rxq; const struct rte_memzone *rz; uint32_t ring_size; uint16_t len, i; - uint16_t base, bsf, tc_mapping; - int use_def_burst_func = 1; + uint16_t reg_idx, base, bsf, tc_mapping; + int q_offset, use_def_burst_func = 1; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { - struct i40e_vf *vf = - I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); vsi = &vf->vsi; - } else + if (!vsi) + return -EINVAL; + reg_idx = queue_idx; + } else { + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); - - if (vsi == NULL) { - PMD_DRV_LOG(ERR, "VSI not available or queue " - "index exceeds the maximum"); - return I40E_ERR_PARAM; + if (!vsi) + return -EINVAL; + q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx); + if (q_offset < 0) + return -EINVAL; + reg_idx = vsi->base_queue + q_offset; } + if (nb_desc % I40E_ALIGN_RING_DESC != 0 || - (nb_desc > I40E_MAX_RING_DESC) || - (nb_desc < I40E_MIN_RING_DESC)) { + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is " "invalid", nb_desc); - return I40E_ERR_PARAM; + return -EINVAL; } /* Free memory if needed */ @@ -1786,12 +1792,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->nb_rx_desc = nb_desc; rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; - if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) - rxq->reg_idx = queue_idx; - else /* PF device */ - rxq->reg_idx = vsi->base_queue + - i40e_get_queue_offset_by_qindex(pf, queue_idx); - + rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); @@ -1822,7 +1823,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, /* Zero all the descriptors in the ring. */ memset(rz->addr, 0, ring_size); - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring_phys_addr = rz->iova; rxq->rx_ring = (union i40e_rx_desc *)rz->addr; len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST); @@ -2012,34 +2013,40 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - struct i40e_vsi *vsi; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi; + struct i40e_pf *pf = NULL; + struct i40e_vf *vf = NULL; struct i40e_tx_queue *txq; const struct rte_memzone *tz; uint32_t ring_size; uint16_t tx_rs_thresh, tx_free_thresh; - uint16_t i, base, bsf, tc_mapping; + uint16_t reg_idx, i, base, bsf, tc_mapping; + int q_offset; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { - struct i40e_vf *vf = - I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); vsi = &vf->vsi; - } else + if (!vsi) + return -EINVAL; + reg_idx = queue_idx; + } else { + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); - - if (vsi == NULL) { - PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) " - "exceeds the maximum", queue_idx); - return I40E_ERR_PARAM; + if (!vsi) + return -EINVAL; + q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx); + if (q_offset < 0) + return -EINVAL; + reg_idx = vsi->base_queue + q_offset; } if (nb_desc % I40E_ALIGN_RING_DESC != 0 || - (nb_desc > I40E_MAX_RING_DESC) || - (nb_desc < I40E_MIN_RING_DESC)) { + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is " "invalid", nb_desc); - return I40E_ERR_PARAM; + return -EINVAL; } /** @@ -2148,18 +2155,13 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->hthresh = tx_conf->tx_thresh.hthresh; txq->wthresh = tx_conf->tx_thresh.wthresh; txq->queue_id = queue_idx; - if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) - txq->reg_idx = queue_idx; - else /* PF device */ - txq->reg_idx = vsi->base_queue + - i40e_get_queue_offset_by_qindex(pf, queue_idx); - + txq->reg_idx = reg_idx; txq->port_id = dev->data->port_id; txq->txq_flags = tx_conf->txq_flags; txq->vsi = vsi; txq->tx_deferred_start = tx_conf->tx_deferred_start; - txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring_phys_addr = tz->iova; txq->tx_ring = (struct i40e_tx_desc *)tz->addr; /* Allocate software ring */ @@ -2221,12 +2223,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id) if (mz) return mz; - if (rte_xen_dom0_supported()) - mz = rte_memzone_reserve_bounded(name, len, - socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M); - else - mz = rte_memzone_reserve_aligned(name, len, - socket_id, 0, I40E_RING_BASE_ALIGN); + mz = rte_memzone_reserve_aligned(name, len, + socket_id, 0, I40E_RING_BASE_ALIGN); return mz; } @@ -2307,18 +2305,40 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq) void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) { + struct rte_eth_dev *dev; uint16_t i; + dev = &rte_eth_devices[txq->port_id]; + if (!txq || !txq->sw_ring) { PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); return; } - for (i = 0; i < txq->nb_tx_desc; i++) { - if (txq->sw_ring[i].mbuf) { + /** + * vPMD tx will not set sw_ring's mbuf to NULL after free, + * so need to free remains more carefully. + */ + if (dev->tx_pkt_burst == i40e_xmit_pkts_vec) { + i = txq->tx_next_dd - txq->tx_rs_thresh + 1; + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; } + } else { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } } } @@ -2431,7 +2451,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq) mbuf->port = rxq->port_id; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); rxd = &rxq->rx_ring[i]; rxd->read.pkt_addr = dma_addr; @@ -2675,7 +2695,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf) txq->reg_idx = pf->fdir.fdir_vsi->base_queue; txq->vsi = pf->fdir.fdir_vsi; - txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring_phys_addr = tz->iova; txq->tx_ring = (struct i40e_tx_desc *)tz->addr; /* * don't need to allocate software ring and reset for the fdir @@ -2731,7 +2751,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; rxq->vsi = pf->fdir.fdir_vsi; - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring_phys_addr = rz->iova; rxq->rx_ring = (union i40e_rx_desc *)rz->addr; /* @@ -2941,6 +2961,64 @@ i40e_set_default_ptype_table(struct rte_eth_dev *dev) ad->ptype_tbl[i] = i40e_get_default_pkt_type(i); } +void __attribute__((cold)) +i40e_set_default_pctype_table(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) + ad->pctypes_tbl[i] = 0ULL; + ad->flow_types_mask = 0ULL; + ad->pctypes_mask = 0ULL; + + ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV4] = + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV6] = + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] = + (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); + + if (hw->mac.type == I40E_MAC_X722) { + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + } + + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) { + if (ad->pctypes_tbl[i]) + ad->flow_types_mask |= (1ULL << i); + ad->pctypes_mask |= ad->pctypes_tbl[i]; + } +} + /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ int __attribute__((weak)) i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 20084d64..06c6a659 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -121,7 +121,7 @@ struct i40e_rx_queue { uint16_t rxrearm_start; /**< the idx we start the re-arming from */ uint64_t mbuf_initializer; /**< value to init mbufs */ - uint8_t port_id; /**< device port ID */ + uint16_t port_id; /**< device port ID */ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */ uint16_t queue_id; /**< RX queue index */ uint16_t reg_idx; /**< RX queue register index */ @@ -167,7 +167,7 @@ struct i40e_tx_queue { uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold reg. */ - uint8_t port_id; /**< Device port identifier. */ + uint16_t port_id; /**< Device port identifier. */ uint16_t queue_id; /**< TX queue index. */ uint16_t reg_idx; uint32_t txq_flags; @@ -255,6 +255,7 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq); void i40e_set_tx_function(struct rte_eth_dev *dev); void i40e_set_default_ptype_table(struct rte_eth_dev *dev); +void i40e_set_default_pctype_table(struct rte_eth_dev *dev); /* For each value it means, datasheet of hardware can tell more details * diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c index f4036ea2..5e4e472a 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c +++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c @@ -100,7 +100,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) p1 = (uintptr_t)&mb1->rearm_data; *(uint64_t *)p1 = rxq->mbuf_initializer; - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr); vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr); @@ -146,7 +146,7 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts) /* map rss and vlan type to rss hash and vlan flag */ const vector unsigned char vlan_flags = (vector unsigned char){ 0, 0, 0, 0, - PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; @@ -538,7 +538,7 @@ vtx1(volatile struct i40e_tx_desc *txdp, ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); vector unsigned long descriptor = (vector unsigned long){ - pkt->buf_physaddr + pkt->data_off, high_qw}; + pkt->buf_iova + pkt->data_off, high_qw}; *(vector unsigned long *)txdp = descriptor; } diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c index 694e91f3..b5685e2b 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -81,13 +81,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) mb0 = rxep[0].mbuf; mb1 = rxep[1].mbuf; - paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM; + paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM; dma_addr0 = vdupq_n_u64(paddr); /* flush desc with pa dma_addr */ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0); - paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM; + paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM; dma_addr1 = vdupq_n_u64(paddr); vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1); } @@ -137,7 +137,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4], /* map rss and vlan type to rss hash and vlan flag */ const uint8x16_t vlan_flags = { 0, 0, 0, 0, - PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; @@ -197,8 +197,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4], } #define PKTLEN_SHIFT 10 - -#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL +#define I40E_UINT16_BIT (CHAR_BIT * sizeof(uint16_t)) static inline void desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts, @@ -230,7 +229,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, struct i40e_rx_entry *sw_ring; uint16_t nb_pkts_recd; int pos; - uint64_t var; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; /* mask to shuffle from desc. to mbuf */ @@ -364,7 +362,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* C.2 get 4 pkts staterr value */ staterr = vzipq_u16(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0]; - stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0); desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); @@ -429,6 +426,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, rx_pkts[pos + 3]->next = NULL; } + staterr = vshlq_n_u16(staterr, I40E_UINT16_BIT - 1); + staterr = vreinterpretq_u16_s16( + vshrq_n_s16(vreinterpretq_s16_u16(staterr), + I40E_UINT16_BIT - 1)); + stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0); + rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP); /* D.3 copy final 1,2 data to rx_pkts */ @@ -438,10 +441,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb1); desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc avaialbe number of desc */ - var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK); - nb_pkts_recd += var; - if (likely(var != RTE_I40E_DESCS_PER_LOOP)) + if (unlikely(stat == 0)) { + nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP; + } else { + nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT; break; + } } /* Update our internal tail pointer */ @@ -515,7 +520,7 @@ vtx1(volatile struct i40e_tx_desc *txdp, ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); - uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw}; + uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, high_qw}; vst1q_u64((uint64_t *)txdp, descriptor); } diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c index 779f14e5..9d2d1f83 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_sse.c +++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -86,8 +86,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) mb0 = rxep[0].mbuf; mb1 = rxep[1].mbuf; - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != offsetof(struct rte_mbuf, buf_addr) + 8); vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); @@ -151,7 +151,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4], /* map rss and vlan type to rss hash and vlan flag */ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, + 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0, 0); const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, @@ -549,7 +549,7 @@ vtx1(volatile struct i40e_tx_desc *txdp, ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); __m128i descriptor = _mm_set_epi64x(high_qw, - pkt->buf_physaddr + pkt->data_off); + pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)txdp, descriptor); } diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c index d90313af..44316f64 100644 --- a/drivers/net/i40e/i40e_tm.c +++ b/drivers/net/i40e/i40e_tm.c @@ -302,7 +302,7 @@ i40e_shaper_profile_add(struct rte_eth_dev *dev, if (!shaper_profile) return -ENOMEM; shaper_profile->shaper_profile_id = shaper_profile_id; - (void)rte_memcpy(&shaper_profile->profile, profile, + rte_memcpy(&shaper_profile->profile, profile, sizeof(struct rte_tm_shaper_params)); TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list, shaper_profile, node); @@ -374,11 +374,13 @@ i40e_tm_node_search(struct rte_eth_dev *dev, } static int -i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id, +i40e_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, uint32_t priority, uint32_t weight, struct rte_tm_node_params *params, struct rte_tm_error *error) { + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (node_id == RTE_TM_NODE_ID_NULL) { error->type = RTE_TM_ERROR_TYPE_NODE_ID; error->message = "invalid node id"; @@ -409,8 +411,8 @@ i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id, return -EINVAL; } - /* for root node */ - if (parent_node_id == RTE_TM_NODE_ID_NULL) { + /* for non-leaf node */ + if (node_id >= hw->func_caps.num_tx_qp) { if (params->nonleaf.wfq_weight_mode) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; @@ -433,7 +435,7 @@ i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id, return 0; } - /* for TC or queue node */ + /* for leaf node */ if (params->leaf.cman) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; error->message = "Congestion management not supported"; @@ -478,7 +480,7 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX; enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX; - struct i40e_tm_shaper_profile *shaper_profile; + struct i40e_tm_shaper_profile *shaper_profile = NULL; struct i40e_tm_node *tm_node; struct i40e_tm_node *parent_node; uint16_t tc_nb = 0; @@ -494,7 +496,7 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id, return -EINVAL; } - ret = i40e_node_param_check(node_id, parent_node_id, priority, weight, + ret = i40e_node_param_check(dev, node_id, priority, weight, params, error); if (ret) return ret; @@ -507,12 +509,15 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id, } /* check the shaper profile id */ - shaper_profile = i40e_shaper_profile_search(dev, - params->shaper_profile_id); - if (!shaper_profile) { - error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; - error->message = "shaper profile not exist"; - return -EINVAL; + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + shaper_profile = i40e_shaper_profile_search( + dev, params->shaper_profile_id); + if (!shaper_profile) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; + error->message = "shaper profile not exist"; + return -EINVAL; + } } /* root node if not have a parent */ @@ -544,12 +549,13 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->reference_count = 0; tm_node->parent = NULL; tm_node->shaper_profile = shaper_profile; - (void)rte_memcpy(&tm_node->params, params, + rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); pf->tm_conf.root = tm_node; /* increase the reference counter of the shaper profile */ - shaper_profile->reference_count++; + if (shaper_profile) + shaper_profile->reference_count++; return 0; } @@ -615,9 +621,9 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->priority = priority; tm_node->weight = weight; tm_node->reference_count = 0; - tm_node->parent = pf->tm_conf.root; + tm_node->parent = parent_node; tm_node->shaper_profile = shaper_profile; - (void)rte_memcpy(&tm_node->params, params, + rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); if (parent_node_type == I40E_TM_NODE_TYPE_PORT) { TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, @@ -631,7 +637,8 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->parent->reference_count++; /* increase the reference counter of the shaper profile */ - shaper_profile->reference_count++; + if (shaper_profile) + shaper_profile->reference_count++; return 0; } @@ -678,14 +685,16 @@ i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id, /* root node */ if (node_type == I40E_TM_NODE_TYPE_PORT) { - tm_node->shaper_profile->reference_count--; + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; rte_free(tm_node); pf->tm_conf.root = NULL; return 0; } /* TC or queue node */ - tm_node->shaper_profile->reference_count--; + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; tm_node->parent->reference_count--; if (node_type == I40E_TM_NODE_TYPE_TC) { TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node); @@ -753,15 +762,34 @@ i40e_level_capabilities_get(struct rte_eth_dev *dev, cap->n_nodes_max = 1; cap->n_nodes_nonleaf_max = 1; cap->n_nodes_leaf_max = 0; - cap->non_leaf_nodes_identical = true; - cap->leaf_nodes_identical = true; + } else if (level_id == I40E_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS; + cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS; + cap->n_nodes_leaf_max = 0; + } else { + /* queue */ + cap->n_nodes_max = hw->func_caps.num_tx_qp; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp; + } + + cap->non_leaf_nodes_identical = true; + cap->leaf_nodes_identical = true; + + if (level_id != I40E_TM_NODE_TYPE_QUEUE) { cap->nonleaf.shaper_private_supported = true; cap->nonleaf.shaper_private_dual_rate_supported = false; cap->nonleaf.shaper_private_rate_min = 0; /* 40Gbps -> 5GBps */ cap->nonleaf.shaper_private_rate_max = 5000000000ull; cap->nonleaf.shaper_shared_n_max = 0; - cap->nonleaf.sched_n_children_max = I40E_MAX_TRAFFIC_CLASS; + if (level_id == I40E_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + I40E_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->func_caps.num_tx_qp; cap->nonleaf.sched_sp_n_priorities_max = 1; cap->nonleaf.sched_wfq_n_children_per_group_max = 0; cap->nonleaf.sched_wfq_n_groups_max = 0; @@ -771,21 +799,7 @@ i40e_level_capabilities_get(struct rte_eth_dev *dev, return 0; } - /* TC or queue node */ - if (level_id == I40E_TM_NODE_TYPE_TC) { - /* TC */ - cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS; - cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS; - cap->n_nodes_leaf_max = 0; - cap->non_leaf_nodes_identical = true; - } else { - /* queue */ - cap->n_nodes_max = hw->func_caps.num_tx_qp; - cap->n_nodes_nonleaf_max = 0; - cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp; - cap->non_leaf_nodes_identical = true; - } - cap->leaf_nodes_identical = true; + /* queue node */ cap->leaf.shaper_private_supported = true; cap->leaf.shaper_private_dual_rate_supported = false; cap->leaf.shaper_private_rate_min = 0; @@ -888,11 +902,15 @@ i40e_hierarchy_commit(struct rte_eth_dev *dev, * If the port has a max bandwidth, the TCs should have none. */ /* port */ - bw = pf->tm_conf.root->shaper_profile->profile.peak.rate; + if (pf->tm_conf.root->shaper_profile) + bw = pf->tm_conf.root->shaper_profile->profile.peak.rate; + else + bw = 0; if (bw) { /* check if any TC has a max bandwidth */ TAILQ_FOREACH(tm_node, tc_list, node) { - if (tm_node->shaper_profile->profile.peak.rate) { + if (tm_node->shaper_profile && + tm_node->shaper_profile->profile.peak.rate) { error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; error->message = "no port and TC max bandwidth" " in parallel"; @@ -936,7 +954,10 @@ i40e_hierarchy_commit(struct rte_eth_dev *dev, } tc_map &= ~BIT_ULL(i); - bw = tm_node->shaper_profile->profile.peak.rate; + if (tm_node->shaper_profile) + bw = tm_node->shaper_profile->profile.peak.rate; + else + bw = 0; if (!bw) continue; @@ -947,7 +968,10 @@ i40e_hierarchy_commit(struct rte_eth_dev *dev, } TAILQ_FOREACH(tm_node, queue_list, node) { - bw = tm_node->shaper_profile->profile.peak.rate; + if (tm_node->shaper_profile) + bw = tm_node->shaper_profile->profile.peak.rate; + else + bw = 0; if (bw) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; error->message = "not support queue QoS"; diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c index f12b7f4a..aeb92af3 100644 --- a/drivers/net/i40e/rte_pmd_i40e.c +++ b/drivers/net/i40e/rte_pmd_i40e.c @@ -35,13 +35,14 @@ #include <rte_tailq.h> #include "base/i40e_prototype.h" +#include "base/i40e_dcb.h" #include "i40e_ethdev.h" #include "i40e_pf.h" #include "i40e_rxtx.h" #include "rte_pmd_i40e.h" int -rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf) +rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -66,7 +67,7 @@ rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf) } int -rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -117,7 +118,7 @@ rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; hw = I40E_VSI_TO_HW(vsi); @@ -170,7 +171,7 @@ i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add) } int -rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -219,7 +220,7 @@ rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK; memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; hw = I40E_VSI_TO_HW(vsi); @@ -264,7 +265,7 @@ i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } @@ -325,7 +326,7 @@ i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi) for (i = 0; i < vlan_num; i++) { mv_f[i].filter_type = f->mac_info.filter_type; - (void)rte_memcpy(&mv_f[i].macaddr, + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); } @@ -407,7 +408,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB; memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); @@ -430,7 +431,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) } int -rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on) +rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -473,7 +474,7 @@ rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on) } int -rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -514,7 +515,7 @@ rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) } int -rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -555,7 +556,7 @@ rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) } int -rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, +rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, struct ether_addr *mac_addr) { struct i40e_mac_filter *f; @@ -591,14 +592,16 @@ rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, /* Remove all existing mac */ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) - i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); + if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr) + != I40E_SUCCESS) + PMD_DRV_LOG(WARNING, "Delete MAC failed"); return 0; } /* Set vlan strip on/off for specific VF from host */ int -rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -633,7 +636,7 @@ rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on) return ret; } -int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, +int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id, uint16_t vlan_id) { struct rte_eth_dev *dev; @@ -685,7 +688,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID; memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; hw = I40E_VSI_TO_HW(vsi); @@ -698,7 +701,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, return ret; } -int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, +int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; @@ -747,7 +750,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, } if (on) { - (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); } else { @@ -764,7 +767,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, return ret; } -int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on) +int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -819,7 +822,7 @@ int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on) } memset(&ctxt, 0, sizeof(ctxt)); - (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); ctxt.seid = vsi->seid; hw = I40E_VSI_TO_HW(vsi); @@ -858,7 +861,7 @@ i40e_vlan_filter_count(struct i40e_vsi *vsi) return count; } -int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, +int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) { struct rte_eth_dev *dev; @@ -941,7 +944,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, } int -rte_pmd_i40e_get_vf_stats(uint8_t port, +rte_pmd_i40e_get_vf_stats(uint16_t port, uint16_t vf_id, struct rte_eth_stats *stats) { @@ -986,7 +989,7 @@ rte_pmd_i40e_get_vf_stats(uint8_t port, } int -rte_pmd_i40e_reset_vf_stats(uint8_t port, +rte_pmd_i40e_reset_vf_stats(uint16_t port, uint16_t vf_id) { struct rte_eth_dev *dev; @@ -1020,7 +1023,7 @@ rte_pmd_i40e_reset_vf_stats(uint8_t port, } int -rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw) +rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -1109,7 +1112,7 @@ rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw) } int -rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id, +rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id, uint8_t tc_num, uint8_t *bw_weight) { struct rte_eth_dev *dev; @@ -1223,7 +1226,7 @@ rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id, } int -rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id, +rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id, uint8_t tc_no, uint32_t bw) { struct rte_eth_dev *dev; @@ -1341,7 +1344,7 @@ rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id, } int -rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map) +rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -1513,7 +1516,7 @@ i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec) /* Check if the profile info exists */ static int -i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec) +i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) { struct rte_eth_dev *dev = &rte_eth_devices[port]; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1557,7 +1560,7 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec) } int -rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, +rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, uint32_t size, enum rte_pmd_i40e_package_op op) { @@ -1606,6 +1609,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, return -EINVAL; } + i40e_update_customized_info(dev, buff, size); + /* Find metadata segment */ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr); @@ -1704,6 +1709,27 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, return status; } +/* Get number of tvl records in the section */ +static unsigned int +i40e_get_tlv_section_size(struct i40e_profile_section_header *sec) +{ + unsigned int i, nb_rec, nb_tlv = 0; + struct i40e_profile_tlv_section_record *tlv; + + if (!sec) + return nb_tlv; + + /* get number of records in the section */ + nb_rec = sec->section.size / + sizeof(struct i40e_profile_tlv_section_record); + for (i = 0; i < nb_rec; ) { + tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i]; + i += tlv->len; + nb_tlv++; + } + return nb_tlv; +} + int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size, uint8_t *info_buff, uint32_t info_size, enum rte_pmd_i40e_package_info type) @@ -1858,12 +1884,162 @@ int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size, return I40E_SUCCESS; } + /* get number of protocols */ + if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) { + struct i40e_profile_section_header *proto; + + if (info_size < sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO, + (struct i40e_profile_segment *)i40e_seg_hdr); + *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto); + return I40E_SUCCESS; + } + + /* get list of protocols */ + if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) { + uint32_t i, j, nb_tlv, nb_rec, nb_proto_info; + struct rte_pmd_i40e_proto_info *pinfo; + struct i40e_profile_section_header *proto; + struct i40e_profile_tlv_section_record *tlv; + + pinfo = (struct rte_pmd_i40e_proto_info *)info_buff; + nb_proto_info = info_size / + sizeof(struct rte_pmd_i40e_proto_info); + for (i = 0; i < nb_proto_info; i++) { + pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED; + memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE); + } + proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO, + (struct i40e_profile_segment *)i40e_seg_hdr); + nb_tlv = i40e_get_tlv_section_size(proto); + if (nb_tlv == 0) + return I40E_SUCCESS; + if (nb_proto_info < nb_tlv) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + /* get number of records in the section */ + nb_rec = proto->section.size / + sizeof(struct i40e_profile_tlv_section_record); + tlv = (struct i40e_profile_tlv_section_record *)&proto[1]; + for (i = j = 0; i < nb_rec; j++) { + pinfo[j].proto_id = tlv->data[0]; + snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s", + (const char *)&tlv->data[1]); + i += tlv->len; + tlv = &tlv[tlv->len]; + } + return I40E_SUCCESS; + } + + /* get number of packet classification types */ + if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) { + struct i40e_profile_section_header *pctype; + + if (info_size < sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE, + (struct i40e_profile_segment *)i40e_seg_hdr); + *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype); + return I40E_SUCCESS; + } + + /* get list of packet classification types */ + if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) { + uint32_t i, j, nb_tlv, nb_rec, nb_proto_info; + struct rte_pmd_i40e_ptype_info *pinfo; + struct i40e_profile_section_header *pctype; + struct i40e_profile_tlv_section_record *tlv; + + pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff; + nb_proto_info = info_size / + sizeof(struct rte_pmd_i40e_ptype_info); + for (i = 0; i < nb_proto_info; i++) + memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED, + sizeof(struct rte_pmd_i40e_ptype_info)); + pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE, + (struct i40e_profile_segment *)i40e_seg_hdr); + nb_tlv = i40e_get_tlv_section_size(pctype); + if (nb_tlv == 0) + return I40E_SUCCESS; + if (nb_proto_info < nb_tlv) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + + /* get number of records in the section */ + nb_rec = pctype->section.size / + sizeof(struct i40e_profile_tlv_section_record); + tlv = (struct i40e_profile_tlv_section_record *)&pctype[1]; + for (i = j = 0; i < nb_rec; j++) { + memcpy(&pinfo[j], tlv->data, + sizeof(struct rte_pmd_i40e_ptype_info)); + i += tlv->len; + tlv = &tlv[tlv->len]; + } + return I40E_SUCCESS; + } + + /* get number of packet types */ + if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) { + struct i40e_profile_section_header *ptype; + + if (info_size < sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE, + (struct i40e_profile_segment *)i40e_seg_hdr); + *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype); + return I40E_SUCCESS; + } + + /* get list of packet types */ + if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) { + uint32_t i, j, nb_tlv, nb_rec, nb_proto_info; + struct rte_pmd_i40e_ptype_info *pinfo; + struct i40e_profile_section_header *ptype; + struct i40e_profile_tlv_section_record *tlv; + + pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff; + nb_proto_info = info_size / + sizeof(struct rte_pmd_i40e_ptype_info); + for (i = 0; i < nb_proto_info; i++) + memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED, + sizeof(struct rte_pmd_i40e_ptype_info)); + ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE, + (struct i40e_profile_segment *)i40e_seg_hdr); + nb_tlv = i40e_get_tlv_section_size(ptype); + if (nb_tlv == 0) + return I40E_SUCCESS; + if (nb_proto_info < nb_tlv) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + /* get number of records in the section */ + nb_rec = ptype->section.size / + sizeof(struct i40e_profile_tlv_section_record); + for (i = j = 0; i < nb_rec; j++) { + tlv = (struct i40e_profile_tlv_section_record *) + &ptype[1 + i]; + memcpy(&pinfo[j], tlv->data, + sizeof(struct rte_pmd_i40e_ptype_info)); + i += tlv->len; + } + return I40E_SUCCESS; + } + PMD_DRV_LOG(ERR, "Info type %u is invalid.", type); return -EINVAL; } int -rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size) +rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size) { struct rte_eth_dev *dev; struct i40e_hw *hw; @@ -1933,7 +2109,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type) tnl != RTE_PTYPE_TUNNEL_VXLAN && tnl != RTE_PTYPE_TUNNEL_NVGRE && tnl != RTE_PTYPE_TUNNEL_GENEVE && - tnl != RTE_PTYPE_TUNNEL_GRENAT) + tnl != RTE_PTYPE_TUNNEL_GRENAT && + tnl != RTE_PTYPE_TUNNEL_GTPC && + tnl != RTE_PTYPE_TUNNEL_GTPU) return -1; if (il2 && @@ -1991,7 +2169,7 @@ static int check_invalid_ptype_mapping( int rte_pmd_i40e_ptype_mapping_update( - uint8_t port, + uint16_t port, struct rte_pmd_i40e_ptype_mapping *mapping_items, uint16_t count, uint8_t exclusive) @@ -2027,7 +2205,7 @@ rte_pmd_i40e_ptype_mapping_update( return 0; } -int rte_pmd_i40e_ptype_mapping_reset(uint8_t port) +int rte_pmd_i40e_ptype_mapping_reset(uint16_t port) { struct rte_eth_dev *dev; @@ -2044,7 +2222,7 @@ int rte_pmd_i40e_ptype_mapping_reset(uint8_t port) } int rte_pmd_i40e_ptype_mapping_get( - uint8_t port, + uint16_t port, struct rte_pmd_i40e_ptype_mapping *mapping_items, uint16_t size, uint16_t *count, @@ -2078,7 +2256,7 @@ int rte_pmd_i40e_ptype_mapping_get( return 0; } -int rte_pmd_i40e_ptype_mapping_replace(uint8_t port, +int rte_pmd_i40e_ptype_mapping_replace(uint16_t port, uint32_t target, uint8_t mask, uint32_t pkt_type) @@ -2115,3 +2293,695 @@ int rte_pmd_i40e_ptype_mapping_replace(uint8_t port, return 0; } + +int +rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + struct i40e_mac_filter_info mac_filter; + int ret; + + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) + return -EINVAL; + + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + ether_addr_copy(mac_addr, &mac_filter.mac_addr); + ret = i40e_vsi_add_mac(vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MAC filter."); + return -1; + } + + return 0; +} + +int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + i40e_set_default_pctype_table(dev); + + return 0; +} + +int rte_pmd_i40e_flow_type_mapping_get( + uint16_t port, + struct rte_pmd_i40e_flow_type_mapping *mapping_items) +{ + struct rte_eth_dev *dev; + struct i40e_adapter *ad; + uint16_t i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) { + mapping_items[i].flow_type = i; + mapping_items[i].pctype = ad->pctypes_tbl[i]; + } + + return 0; +} + +int +rte_pmd_i40e_flow_type_mapping_update( + uint16_t port, + struct rte_pmd_i40e_flow_type_mapping *mapping_items, + uint16_t count, + uint8_t exclusive) +{ + struct rte_eth_dev *dev; + struct i40e_adapter *ad; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (count > I40E_FLOW_TYPE_MAX) + return -EINVAL; + + for (i = 0; i < count; i++) + if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX || + mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN || + (mapping_items[i].pctype & + (1ULL << I40E_FILTER_PCTYPE_INVALID))) + return -EINVAL; + + ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (exclusive) { + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) + ad->pctypes_tbl[i] = 0ULL; + ad->flow_types_mask = 0ULL; + } + + for (i = 0; i < count; i++) { + ad->pctypes_tbl[mapping_items[i].flow_type] = + mapping_items[i].pctype; + if (mapping_items[i].pctype) + ad->flow_types_mask |= + (1ULL << mapping_items[i].flow_type); + else + ad->flow_types_mask &= + ~(1ULL << mapping_items[i].flow_type); + } + + for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++) + ad->pctypes_mask |= ad->pctypes_tbl[i]; + + return 0; +} + +int +rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac) +{ + struct rte_eth_dev *dev; + struct ether_addr *mac; + struct i40e_pf *pf; + int vf_id; + struct i40e_pf_vf *vf; + uint16_t vf_num; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + vf_num = pf->vf_num; + + for (vf_id = 0; vf_id < vf_num; vf_id++) { + vf = &pf->vfs[vf_id]; + mac = &vf->mac_addr; + + if (is_same_ether_addr(mac, vf_mac)) + return vf_id; + } + + return -EINVAL; +} + +static int +i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + uint16_t i; + struct i40e_vsi *vsi = pf->main_vsi; + uint16_t queue_offset, bsf, tc_index; + struct i40e_vsi_context ctxt; + struct i40e_aqc_vsi_properties_data *vsi_info; + struct i40e_queue_regions *region_info = + &pf->queue_region; + int32_t ret = -EINVAL; + + if (!region_info->queue_region_number) { + PMD_INIT_LOG(ERR, "there is no that region id been set before"); + return ret; + } + + memset(&ctxt, 0, sizeof(struct i40e_vsi_context)); + + /* Update Queue Pairs Mapping for currently enabled UPs */ + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.info = vsi->info; + vsi_info = &ctxt.info; + + memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8); + memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16); + + /* Configure queue region and queue mapping parameters, + * for enabled queue region, allocate queues to this region. + */ + + for (i = 0; i < region_info->queue_region_number; i++) { + tc_index = region_info->region[i].region_id; + bsf = rte_bsf32(region_info->region[i].queue_num); + queue_offset = region_info->region[i].queue_start_index; + vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16( + (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + } + + /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */ + vsi_info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG); + vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); + vsi_info->valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); + + /* Update the VSI after updating the VSI queue-mapping information */ + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ", + hw->aq.asq_last_status); + return ret; + } + /* update the local VSI info with updated queue map */ + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + rte_memcpy(&vsi->info.queue_mapping, + &ctxt.info.queue_mapping, + sizeof(vsi->info.queue_mapping)); + vsi->info.mapping_flags = ctxt.info.mapping_flags; + vsi->info.valid_sections = 0; + + return 0; +} + + +static int +i40e_queue_region_set_region(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *conf_ptr) +{ + uint16_t i; + struct i40e_vsi *main_vsi = pf->main_vsi; + struct i40e_queue_regions *info = &pf->queue_region; + int32_t ret = -EINVAL; + + if (!((rte_is_power_of_2(conf_ptr->queue_num)) && + conf_ptr->queue_num <= 64)) { + PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " + "total number of queues do not exceed the VSI allocation"); + return ret; + } + + if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if ((conf_ptr->queue_start_index + conf_ptr->queue_num) + > main_vsi->nb_used_qps) { + PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range"); + return ret; + } + + for (i = 0; i < info->queue_region_number; i++) + if (conf_ptr->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number && + i <= I40E_REGION_MAX_INDEX) { + info->region[i].region_id = conf_ptr->region_id; + info->region[i].queue_num = conf_ptr->queue_num; + info->region[i].queue_start_index = + conf_ptr->queue_start_index; + info->queue_region_number++; + } else { + PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before"); + return ret; + } + + return 0; +} + +static int +i40e_queue_region_set_flowtype(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *rss_region_conf) +{ + int32_t ret = -EINVAL; + struct i40e_queue_regions *info = &pf->queue_region; + uint16_t i, j; + uint16_t region_index, flowtype_index; + + /* For the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + + if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) { + PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63"); + return ret; + } + + + for (i = 0; i < info->queue_region_number; i++) + if (rss_region_conf->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number) { + PMD_DRV_LOG(ERR, "that region id has not been set before"); + ret = -EINVAL; + return ret; + } + region_index = i; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].flowtype_num; j++) { + if (rss_region_conf->hw_flowtype == + info->region[i].hw_flowtype[j]) { + PMD_DRV_LOG(ERR, "that hw_flowtype has been set before"); + return 0; + } + } + } + + flowtype_index = info->region[region_index].flowtype_num; + info->region[region_index].hw_flowtype[flowtype_index] = + rss_region_conf->hw_flowtype; + info->region[region_index].flowtype_num++; + + return 0; +} + +static void +i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + uint8_t hw_flowtype; + uint32_t pfqf_hregion; + uint16_t i, j, index; + struct i40e_queue_regions *info = &pf->queue_region; + + /* For the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].flowtype_num; j++) { + hw_flowtype = info->region[i].hw_flowtype[j]; + index = hw_flowtype >> 3; + pfqf_hregion = + i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index)); + + if ((hw_flowtype & 0x7) == 0) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_0_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT; + } else if ((hw_flowtype & 0x7) == 1) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_1_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT; + } else if ((hw_flowtype & 0x7) == 2) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_2_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT; + } else if ((hw_flowtype & 0x7) == 3) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_3_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT; + } else if ((hw_flowtype & 0x7) == 4) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_4_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT; + } else if ((hw_flowtype & 0x7) == 5) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_5_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT; + } else if ((hw_flowtype & 0x7) == 6) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_6_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT; + } else { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_7_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT; + } + + i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index), + pfqf_hregion); + } + } +} + +static int +i40e_queue_region_set_user_priority(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *rss_region_conf) +{ + struct i40e_queue_regions *info = &pf->queue_region; + int32_t ret = -EINVAL; + uint16_t i, j, region_index; + + if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the region_id max index is 7"); + return ret; + } + + for (i = 0; i < info->queue_region_number; i++) + if (rss_region_conf->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number) { + PMD_DRV_LOG(ERR, "that region id has not been set before"); + ret = -EINVAL; + return ret; + } + + region_index = i; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].user_priority_num; j++) { + if (info->region[i].user_priority[j] == + rss_region_conf->user_priority) { + PMD_DRV_LOG(ERR, "that user priority has been set before"); + return 0; + } + } + } + + j = info->region[region_index].user_priority_num; + info->region[region_index].user_priority[j] = + rss_region_conf->user_priority; + info->region[region_index].user_priority_num++; + + return 0; +} + +static int +i40e_queue_region_dcb_configure(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + struct i40e_dcbx_config dcb_cfg_local; + struct i40e_dcbx_config *dcb_cfg; + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config; + int32_t ret = -EINVAL; + uint16_t i, j, prio_index, region_index; + uint8_t tc_map, tc_bw, bw_lf; + + if (!info->queue_region_number) { + PMD_DRV_LOG(ERR, "No queue region been set before"); + return ret; + } + + dcb_cfg = &dcb_cfg_local; + memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config)); + + /* assume each tc has the same bw */ + tc_bw = I40E_MAX_PERCENT / info->queue_region_number; + for (i = 0; i < info->queue_region_number; i++) + dcb_cfg->etscfg.tcbwtable[i] = tc_bw; + /* to ensure the sum of tcbw is equal to 100 */ + bw_lf = I40E_MAX_PERCENT % info->queue_region_number; + for (i = 0; i < bw_lf; i++) + dcb_cfg->etscfg.tcbwtable[i]++; + + /* assume each tc has the same Transmission Selection Algorithm */ + for (i = 0; i < info->queue_region_number; i++) + dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].user_priority_num; j++) { + prio_index = info->region[i].user_priority[j]; + region_index = info->region[i].region_id; + dcb_cfg->etscfg.prioritytable[prio_index] = + region_index; + } + } + + /* FW needs one App to configure HW */ + dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM; + dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO; + dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t); + + dcb_cfg->pfc.willing = 0; + dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + dcb_cfg->pfc.pfcenable = tc_map; + + /* Copy the new config to the current config */ + *old_cfg = *dcb_cfg; + old_cfg->etsrec = old_cfg->etscfg; + ret = i40e_set_dcb_config(hw); + + if (ret) { + PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + + return 0; +} + +int +i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, + struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on) +{ + int32_t ret = -EINVAL; + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_vsi *main_vsi = pf->main_vsi; + + if (on) { + i40e_queue_region_pf_flowtype_conf(hw, pf); + + ret = i40e_vsi_update_queue_region_mapping(hw, pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush queue region mapping."); + return ret; + } + + ret = i40e_queue_region_dcb_configure(hw, pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush dcb."); + return ret; + } + + return 0; + } + + info->queue_region_number = 1; + info->region[0].queue_num = main_vsi->nb_used_qps; + info->region[0].queue_start_index = 0; + + ret = i40e_vsi_update_queue_region_mapping(hw, pf); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(INFO, "Failed to flush queue region mapping."); + + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + + i40e_init_queue_region_conf(dev); + + return 0; +} + +static int +i40e_queue_region_pf_check_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + + if (!hena) + return -ENOTSUP; + + return 0; +} + +static int +i40e_queue_region_get_all_info(struct i40e_pf *pf, + struct i40e_queue_regions *regions_ptr) +{ + struct i40e_queue_regions *info = &pf->queue_region; + + rte_memcpy(regions_ptr, info, + sizeof(struct i40e_queue_regions)); + + return 0; +} + +int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id, + enum rte_pmd_i40e_queue_region_op op_type, void *arg) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int32_t ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (!(!i40e_queue_region_pf_check_rss(pf))) + return -ENOTSUP; + + /* This queue region feature only support pf by now. It should + * be called after dev_start, and will be clear after dev_stop. + * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON" + * is just an enable function which server for other configuration, + * it is for all configuration about queue region from up layer, + * at first will only keep in DPDK softwarestored in driver, + * only after "FLUSH_ON", it commit all configuration to HW. + * Because PMD had to set hardware configuration at a time, so + * it will record all up layer command at first. + * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is + * just clean all configuration about queue region just now, + * and restore all to DPDK i40e driver default + * config when start up. + */ + + switch (op_type) { + case RTE_PMD_I40E_RSS_QUEUE_REGION_SET: + ret = i40e_queue_region_set_region(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET: + ret = i40e_queue_region_set_flowtype(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET: + ret = i40e_queue_region_set_user_priority(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON: + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF: + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET: + ret = i40e_queue_region_get_all_info(pf, + (struct i40e_queue_regions *)arg); + break; + default: + PMD_DRV_LOG(WARNING, "op type (%d) not supported", + op_type); + ret = -EINVAL; + } + + I40E_WRITE_FLUSH(hw); + + return ret; +} + +int rte_pmd_i40e_flow_add_del_packet_template( + uint16_t port, + const struct rte_pmd_i40e_pkt_template_conf *conf, + uint8_t add) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port]; + struct i40e_fdir_filter_conf filter_conf; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + memset(&filter_conf, 0, sizeof(filter_conf)); + filter_conf.soft_id = conf->soft_id; + filter_conf.input.flow.raw_flow.pctype = conf->input.pctype; + filter_conf.input.flow.raw_flow.packet = conf->input.packet; + filter_conf.input.flow.raw_flow.length = conf->input.length; + filter_conf.input.flow_ext.pkt_template = true; + + filter_conf.action.rx_queue = conf->action.rx_queue; + filter_conf.action.behavior = + (enum i40e_fdir_behavior)conf->action.behavior; + filter_conf.action.report_status = + (enum i40e_fdir_status)conf->action.report_status; + filter_conf.action.flex_off = conf->action.flex_off; + + return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add); +} diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h index 356fa89d..580ca4ae 100644 --- a/drivers/net/i40e/rte_pmd_i40e.h +++ b/drivers/net/i40e/rte_pmd_i40e.h @@ -88,10 +88,48 @@ enum rte_pmd_i40e_package_info { RTE_PMD_I40E_PKG_INFO_HEADER, RTE_PMD_I40E_PKG_INFO_DEVID_NUM, RTE_PMD_I40E_PKG_INFO_DEVID_LIST, + RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM, + RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST, + RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM, + RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST, + RTE_PMD_I40E_PKG_INFO_PTYPE_NUM, + RTE_PMD_I40E_PKG_INFO_PTYPE_LIST, RTE_PMD_I40E_PKG_INFO_MAX = 0xFFFFFFFF }; -#define RTE_PMD_I40E_DDP_NAME_SIZE 32 +/** + * Option types of queue region. + */ +enum rte_pmd_i40e_queue_region_op { + RTE_PMD_I40E_RSS_QUEUE_REGION_UNDEFINED, + /** add queue region set */ + RTE_PMD_I40E_RSS_QUEUE_REGION_SET, + /** add PF region pctype set */ + RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET, + /** add queue region user priority set */ + RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET, + /** + * ALL configuration about queue region from up layer + * at first will only keep in DPDK software stored in driver, + * only after " FLUSH_ON ", it commit all configuration to HW. + * Because PMD had to set hardware configuration at a time, so + * it will record all up layer command at first. + */ + RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON, + /** + * "FLUSH_OFF " is just clean all configuration about queue + * region just now, and restore all to DPDK i40e driver default + * config when start up. + */ + RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF, + RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET, + RTE_PMD_I40E_RSS_QUEUE_REGION_OP_MAX +}; + +#define RTE_PMD_I40E_DDP_NAME_SIZE 32 +#define RTE_PMD_I40E_PCTYPE_MAX 64 +#define RTE_PMD_I40E_REGION_MAX_NUM 8 +#define RTE_PMD_I40E_MAX_USER_PRIORITY 8 /** * Version for dynamic device personalization. @@ -133,6 +171,25 @@ struct rte_pmd_i40e_profile_list { struct rte_pmd_i40e_profile_info p_info[1]; }; +#define RTE_PMD_I40E_PROTO_NUM 6 +#define RTE_PMD_I40E_PROTO_UNUSED 0xFF + +/** + * Protocols information stored in profile + */ +struct rte_pmd_i40e_proto_info { + uint8_t proto_id; + char name[RTE_PMD_I40E_DDP_NAME_SIZE]; +}; + +/** + * Packet classification/ packet type information stored in profile + */ +struct rte_pmd_i40e_ptype_info { + uint8_t ptype_id; + uint8_t protocols[RTE_PMD_I40E_PROTO_NUM]; +}; + /** * ptype mapping table only accept RTE_PTYPE_XXX or "user defined" ptype. * A ptype with MSB set will be regarded as a user defined ptype. @@ -146,6 +203,141 @@ struct rte_pmd_i40e_ptype_mapping { }; /** + * Queue region related information. + */ +struct rte_pmd_i40e_queue_region_conf { + /** the region id for this configuration */ + uint8_t region_id; + /** the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + uint8_t hw_flowtype; + /** the start queue index for this region */ + uint8_t queue_start_index; + /** the total queue number of this queue region */ + uint8_t queue_num; + /** the packet's user priority for this region */ + uint8_t user_priority; +}; + +/* queue region info */ +struct rte_pmd_i40e_queue_region_info { + /** the region id for this configuration */ + uint8_t region_id; + /** the start queue index for this region */ + uint8_t queue_start_index; + /** the total queue number of this queue region */ + uint8_t queue_num; + /** the total number of user priority for this region */ + uint8_t user_priority_num; + /** the packet's user priority for this region */ + uint8_t user_priority[RTE_PMD_I40E_MAX_USER_PRIORITY]; + /** the total number of flowtype for this region */ + uint8_t flowtype_num; + /** + * the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + uint8_t hw_flowtype[RTE_PMD_I40E_PCTYPE_MAX]; +}; + +struct rte_pmd_i40e_queue_regions { + /** the total number of queue region for this port */ + uint16_t queue_region_number; + struct rte_pmd_i40e_queue_region_info + region[RTE_PMD_I40E_REGION_MAX_NUM]; +}; + +/** + * Behavior will be taken if raw packet template is matched. + */ +enum rte_pmd_i40e_pkt_template_behavior { + RTE_PMD_I40E_PKT_TEMPLATE_ACCEPT, + RTE_PMD_I40E_PKT_TEMPLATE_REJECT, + RTE_PMD_I40E_PKT_TEMPLATE_PASSTHRU, +}; + +/** + * Flow director report status + * It defines what will be reported if raw packet template is matched. + */ +enum rte_pmd_i40e_pkt_template_status { + /** report nothing */ + RTE_PMD_I40E_PKT_TEMPLATE_NO_REPORT_STATUS, + /** only report FD ID */ + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID, + /** report FD ID and 4 flex bytes */ + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID_FLEX_4, + /** report 8 flex bytes */ + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_FLEX_8, +}; + +/** + * A structure used to define an action when raw packet template is matched. + */ +struct rte_pmd_i40e_pkt_template_action { + /** queue assigned to if raw packet template match */ + uint16_t rx_queue; + /** behavior will be taken */ + enum rte_pmd_i40e_pkt_template_behavior behavior; + /** status report option */ + enum rte_pmd_i40e_pkt_template_status report_status; + /** + * If report_status is RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID_FLEX_4 or + * RTE_PMD_I40E_PKT_TEMPLATE_REPORT_FLEX_8, flex_off specifies + * where the reported flex bytes start from in flexible payload. + */ + uint8_t flex_off; +}; + +/** + * A structure used to define the input for raw packet template. + */ +struct rte_pmd_i40e_pkt_template_input { + /** the pctype used for raw packet template */ + uint16_t pctype; + /** the buffer conatining raw packet template */ + void *packet; + /** the length of buffer with raw packet template */ + uint32_t length; +}; + +/** + * A structure used to define the configuration parameters + * for raw packet template. + */ +struct rte_pmd_i40e_pkt_template_conf { + /** the input for raw packet template. */ + struct rte_pmd_i40e_pkt_template_input input; + /** the action to be taken when raw packet template is matched */ + struct rte_pmd_i40e_pkt_template_action action; + /** ID, an unique software index for the raw packet template filter */ + uint32_t soft_id; +}; + +/** + * Add or remove raw packet template filter to Flow Director. + * + * @param port + * The port identifier of the Ethernet device. + * @param conf + * Specifies configuration parameters of raw packet template filter. + * @param add + * Speicifes an action to be taken - add or remove raw packet template filter. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *conf* invalid. + * - (-ENOTSUP) not supported by firmware. + */ +int rte_pmd_i40e_flow_add_del_packet_template( + uint16_t port, + const struct rte_pmd_i40e_pkt_template_conf *conf, + uint8_t add); + +/** * Notify VF when PF link status changes. * * @param port @@ -157,7 +349,7 @@ struct rte_pmd_i40e_ptype_mapping { * - (-ENODEV) if *port* invalid. * - (-EINVAL) if *vf* invalid. */ -int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf); +int rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf); /** * Enable/Disable VF MAC anti spoofing. @@ -174,7 +366,7 @@ int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, +int rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on); @@ -193,7 +385,7 @@ int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, +int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on); @@ -210,7 +402,7 @@ int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_set_tx_loopback(uint8_t port, +int rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on); /** @@ -228,7 +420,7 @@ int rte_pmd_i40e_set_tx_loopback(uint8_t port, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, +int rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on); @@ -247,7 +439,7 @@ int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, +int rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on); @@ -271,7 +463,7 @@ int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if *vf* or *mac_addr* is invalid. */ -int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, +int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, struct ether_addr *mac_addr); /** @@ -291,7 +483,7 @@ int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, * - (-EINVAL) if bad parameter. */ int -rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on); +rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on); /** * Enable/Disable vf vlan insert @@ -309,7 +501,7 @@ rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, +int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id, uint16_t vlan_id); /** @@ -328,7 +520,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, +int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id, uint8_t on); /** @@ -347,7 +539,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on); +int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on); /** * Enable/Disable VF VLAN filter @@ -368,7 +560,7 @@ int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on); * - (-EINVAL) if bad parameter. * - (-ENOTSUP) not supported by firmware. */ -int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, +int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id, uint64_t vf_mask, uint8_t on); /** @@ -393,7 +585,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_get_vf_stats(uint8_t port, +int rte_pmd_i40e_get_vf_stats(uint16_t port, uint16_t vf_id, struct rte_eth_stats *stats); @@ -409,7 +601,7 @@ int rte_pmd_i40e_get_vf_stats(uint8_t port, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_reset_vf_stats(uint8_t port, +int rte_pmd_i40e_reset_vf_stats(uint16_t port, uint16_t vf_id); /** @@ -434,7 +626,7 @@ int rte_pmd_i40e_reset_vf_stats(uint8_t port, * - (-EINVAL) if bad parameter. * - (-ENOTSUP) not supported by firmware. */ -int rte_pmd_i40e_set_vf_max_bw(uint8_t port, +int rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw); @@ -459,7 +651,7 @@ int rte_pmd_i40e_set_vf_max_bw(uint8_t port, * - (-EINVAL) if bad parameter. * - (-ENOTSUP) not supported by firmware. */ -int rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, +int rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id, uint8_t tc_num, uint8_t *bw_weight); @@ -484,7 +676,7 @@ int rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, * - (-EINVAL) if bad parameter. * - (-ENOTSUP) not supported by firmware. */ -int rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, +int rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id, uint8_t tc_no, uint32_t bw); @@ -502,7 +694,7 @@ int rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, * - (-EINVAL) if bad parameter. * - (-ENOTSUP) not supported by firmware. */ -int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map); +int rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map); /** * Load/Unload a ddp package @@ -523,7 +715,7 @@ int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map); * - (-EACCES) if profile does not exist. * - (-ENOTSUP) if operation not supported. */ -int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, +int rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, uint32_t size, enum rte_pmd_i40e_package_op op); @@ -561,7 +753,7 @@ int rte_pmd_i40e_get_ddp_info(uint8_t *pkg, uint32_t pkg_size, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size); +int rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size); /** * Update hardware defined ptype to software defined packet type @@ -581,7 +773,7 @@ int rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size); * set other PTYPEs maps to PTYPE_UNKNOWN. */ int rte_pmd_i40e_ptype_mapping_update( - uint8_t port, + uint16_t port, struct rte_pmd_i40e_ptype_mapping *mapping_items, uint16_t count, uint8_t exclusive); @@ -593,7 +785,7 @@ int rte_pmd_i40e_ptype_mapping_update( * @param port * pointer to port identifier of the device */ -int rte_pmd_i40e_ptype_mapping_reset(uint8_t port); +int rte_pmd_i40e_ptype_mapping_reset(uint16_t port); /** * Get hardware defined ptype to software defined ptype @@ -612,7 +804,7 @@ int rte_pmd_i40e_ptype_mapping_reset(uint8_t port); * -(!0) only return mapping items which packet_type != RTE_PTYPE_UNKNOWN. */ int rte_pmd_i40e_ptype_mapping_get( - uint8_t port, + uint16_t port, struct rte_pmd_i40e_ptype_mapping *mapping_items, uint16_t size, uint16_t *count, @@ -632,9 +824,113 @@ int rte_pmd_i40e_ptype_mapping_get( * @param pkt_type * the new packet type to overwrite */ -int rte_pmd_i40e_ptype_mapping_replace(uint8_t port, +int rte_pmd_i40e_ptype_mapping_replace(uint16_t port, uint32_t target, uint8_t mask, uint32_t pkt_type); +/** + * Add a VF MAC address. + * + * Add more MAC address for VF. The existing MAC addresses + * are still effective. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct ether_addr *mac_addr); + +#define RTE_PMD_I40E_PCTYPE_MAX 64 +#define RTE_PMD_I40E_FLOW_TYPE_MAX 64 + +struct rte_pmd_i40e_flow_type_mapping { + uint16_t flow_type; /**< software defined flow type*/ + uint64_t pctype; /**< hardware defined pctype */ +}; + +/** + * Update hardware defined pctype to software defined flow type + * mapping table. + * + * @param port + * pointer to port identifier of the device. + * @param mapping_items + * the base address of the mapping items array. + * @param count + * number of mapping items. + * @param exclusive + * the flag indicate different pctype mapping update method. + * -(0) only overwrite referred PCTYPE mapping, + * keep other PCTYPEs mapping unchanged. + * -(!0) overwrite referred PCTYPE mapping, + * set other PCTYPEs maps to PCTYPE_INVALID. + */ +int rte_pmd_i40e_flow_type_mapping_update( + uint16_t port, + struct rte_pmd_i40e_flow_type_mapping *mapping_items, + uint16_t count, + uint8_t exclusive); + +/** + * Get software defined flow type to hardware defined pctype + * mapping items. + * + * @param port + * pointer to port identifier of the device. + * @param mapping_items + * the base address of the array to store returned items. + * array should be allocated by caller with minimum size of + * RTE_PMD_I40E_FLOW_TYPE_MAX items + */ +int rte_pmd_i40e_flow_type_mapping_get( + uint16_t port, + struct rte_pmd_i40e_flow_type_mapping *mapping_items); + +/** + * Reset hardware defined pctype to software defined flow type + * mapping table to default. + * + * @param port + * pointer to port identifier of the device + */ +int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port); + +/** + * On the PF, find VF index based on VF MAC address + * + * @param port + * pointer to port identifier of the device + * @param vf_mac + * the mac address of the vf to determine index of + * @return + * The index of vfid If successful. + * -EINVAL: vf mac address does not exist for this port + * -ENOTSUP: i40e not supported for this port. + */ +int rte_pmd_i40e_query_vfid_by_mac(uint16_t port, + const struct ether_addr *vf_mac); + +/** + * Do RSS queue region configuration for that port as + * the command option type + * + * @param port_id + * The port identifier of the Ethernet device. + * @param op_type + * Queue region operation type + * @param arg + * Queue region operation type specific data + */ +int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id, + enum rte_pmd_i40e_queue_region_op op_type, void *arg); + #endif /* _PMD_I40E_H_ */ diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map index 20cc9801..ebbd24e0 100644 --- a/drivers/net/i40e/rte_pmd_i40e_version.map +++ b/drivers/net/i40e/rte_pmd_i40e_version.map @@ -45,3 +45,16 @@ DPDK_17.08 { rte_pmd_i40e_get_ddp_info; } DPDK_17.05; + +DPDK_17.11 { + global: + + rte_pmd_i40e_add_vf_mac_addr; + rte_pmd_i40e_flow_add_del_packet_template; + rte_pmd_i40e_flow_type_mapping_update; + rte_pmd_i40e_flow_type_mapping_get; + rte_pmd_i40e_flow_type_mapping_reset; + rte_pmd_i40e_query_vfid_by_mac; + rte_pmd_i40e_rss_queue_region_conf; + +} DPDK_17.08; |