diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:15:11 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:45:54 +0000 |
commit | 055c52583a2794da8ba1e85a48cce3832372b12f (patch) | |
tree | 8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/net/ixgbe | |
parent | f239aed5e674965691846e8ce3f187dd47523689 (diff) |
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/Makefile | 11 | ||||
-rw-r--r-- | drivers/net/ixgbe/base/ixgbe_osdep.h | 8 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethdev.c | 208 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethdev.h | 55 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_fdir.c | 5 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_flow.c | 179 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ipsec.c | 737 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ipsec.h | 151 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_pf.c | 61 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_rxtx.c | 141 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_rxtx.h | 19 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 10 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 53 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_tm.c | 97 | ||||
-rw-r--r-- | drivers/net/ixgbe/rte_pmd_ixgbe.c | 60 | ||||
-rw-r--r-- | drivers/net/ixgbe/rte_pmd_ixgbe.h | 64 |
16 files changed, 1607 insertions, 252 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 5e57cb35..511a64eb 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -41,7 +41,7 @@ CFLAGS += $(WERROR_FLAGS) EXPORT_MAP := rte_pmd_ixgbe_version.map -LIBABIVER := 1 +LIBABIVER := 2 ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # @@ -82,12 +82,15 @@ endif endif endif +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash +LDLIBS += -lrte_bus_pci # # Add extra flags for base driver files (also known as shared code) # to disable warnings in them # -BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) $(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) VPATH += $(SRCDIR)/base @@ -118,11 +121,13 @@ SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c else SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c endif - ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y) SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c endif +ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y) +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c +endif SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h index 4aab278d..bb5dfd2a 100644 --- a/drivers/net/ixgbe/base/ixgbe_osdep.h +++ b/drivers/net/ixgbe/base/ixgbe_osdep.h @@ -161,4 +161,12 @@ static inline uint32_t ixgbe_read_addr(volatile void* addr) #define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \ IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value)) +#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms) \ +do { \ + uint32_t cnt = poll_ms; \ + IXGBE_WRITE_REG(hw, (reg), (val)); \ + while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--)) \ + rte_delay_ms(1); \ +} while (0) + #endif /* _IXGBE_OS_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 22171d86..ff19a564 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -48,10 +48,10 @@ #include <rte_log.h> #include <rte_debug.h> #include <rte_pci.h> +#include <rte_bus_pci.h> #include <rte_atomic.h> #include <rte_branch_prediction.h> #include <rte_memory.h> -#include <rte_memzone.h> #include <rte_eal.h> #include <rte_alarm.h> #include <rte_ether.h> @@ -61,6 +61,9 @@ #include <rte_random.h> #include <rte_dev.h> #include <rte_hash_crc.h> +#ifdef RTE_LIBRTE_SECURITY +#include <rte_security_driver.h> +#endif #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -169,13 +172,14 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); static void ixgbe_dev_close(struct rte_eth_dev *dev); +static int ixgbe_dev_reset(struct rte_eth_dev *dev); static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); static int ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n); @@ -218,7 +222,7 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); @@ -265,16 +269,17 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); +static int ixgbevf_dev_reset(struct rte_eth_dev *dev); static void ixgbevf_intr_disable(struct ixgbe_hw *hw); static void ixgbevf_intr_enable(struct ixgbe_hw *hw); -static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -518,6 +523,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .dev_set_link_up = ixgbe_dev_set_link_up, .dev_set_link_down = ixgbe_dev_set_link_down, .dev_close = ixgbe_dev_close, + .dev_reset = ixgbe_dev_reset, .promiscuous_enable = ixgbe_dev_promiscuous_enable, .promiscuous_disable = ixgbe_dev_promiscuous_disable, .allmulticast_enable = ixgbe_dev_allmulticast_enable, @@ -608,6 +614,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .xstats_reset = ixgbevf_dev_stats_reset, .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, + .dev_reset = ixgbevf_dev_reset, .allmulticast_enable = ixgbevf_dev_allmulticast_enable, .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, @@ -1163,8 +1170,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) return 0; } +#ifdef RTE_LIBRTE_SECURITY + /* Initialize security_ctx only for primary process*/ + eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev); + if (eth_dev->security_ctx == NULL) + return -ENOMEM; +#endif + rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; @@ -1332,12 +1345,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* initialize l2 tunnel filter list & hash */ ixgbe_l2_tn_filter_init(eth_dev); - TAILQ_INIT(&filter_ntuple_list); - TAILQ_INIT(&filter_ethertype_list); - TAILQ_INIT(&filter_syn_list); - TAILQ_INIT(&filter_fdir_list); - TAILQ_INIT(&filter_l2_tunnel_list); - TAILQ_INIT(&ixgbe_flow_list); + /* initialize flow filter lists */ + ixgbe_filterlist_init(); /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); @@ -1401,6 +1410,10 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* Remove all Traffic Manager configuration */ ixgbe_tm_conf_uninit(eth_dev); +#ifdef RTE_LIBRTE_SECURITY + rte_free(eth_dev->security_ctx); +#endif + return 0; } @@ -1627,7 +1640,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -1781,7 +1793,8 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_ixgbe_pmd = { .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_ixgbe_pci_probe, .remove = eth_ixgbe_pci_remove, }; @@ -1803,7 +1816,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_ixgbevf_pmd = { .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_ixgbevf_pci_probe, .remove = eth_ixgbevf_pci_remove, }; @@ -1959,9 +1972,9 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; if (on) - rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; else - rxq->vlan_flags = PKT_RX_VLAN_PKT; + rxq->vlan_flags = PKT_RX_VLAN; } static void @@ -2125,7 +2138,7 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) */ } -static void +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { if (mask & ETH_VLAN_STRIP_MASK) { @@ -2148,6 +2161,8 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) else ixgbe_vlan_hw_extend_disable(dev); } + + return 0; } static void @@ -2504,8 +2519,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev) * - fixed speed: TODO implement */ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported", - dev->data->port_id); + PMD_INIT_LOG(ERR, + "Invalid link_speeds for port %u, fix speed not supported", + dev->data->port_id); return -EINVAL; } @@ -2568,9 +2584,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbe_vlan_offload_set(dev, mask); + err = ixgbe_vlan_offload_set(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); + goto error; + } if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { /* Enable vlan filtering for VMDq */ @@ -2842,7 +2862,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) } /* - * Reest and stop device. + * Reset and stop device. */ static void ixgbe_dev_close(struct rte_eth_dev *dev) @@ -2865,6 +2885,32 @@ ixgbe_dev_close(struct rte_eth_dev *dev) ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } +/* + * Reset PF device. + */ +static int +ixgbe_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to ixgbe PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_ixgbe_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbe_dev_init(dev); + + return ret; +} + static void ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats *hw_stats, @@ -3077,7 +3123,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, /* * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c */ -static void +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ixgbe_hw *hw = @@ -3099,7 +3145,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) &total_qbrc, &total_qprc, &total_qprdc); if (stats == NULL) - return; + return -EINVAL; /* Fill out the rte_eth_stats statistics structure */ stats->ipackets = total_qprc; @@ -3130,6 +3176,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Tx Errors */ stats->oerrors = 0; + return 0; } static void @@ -3541,7 +3588,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, return IXGBEVF_NB_XSTATS; } -static void +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -3550,12 +3597,13 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ixgbevf_update_stats(dev); if (stats == NULL) - return; + return -EINVAL; stats->ipackets = hw_stats->vfgprc; stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; stats->obytes = hw_stats->vfgotc; + return 0; } static void @@ -3665,6 +3713,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw->mac.type == ixgbe_mac_X550EM_a) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIBRTE_SECURITY + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = IXGBE_DEFAULT_RX_PTHRESH, @@ -3927,6 +3980,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, link.link_status = ETH_LINK_DOWN; link.link_speed = 0; link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; memset(&old, 0, sizeof(old)); rte_ixgbe_dev_atomic_read_link_status(dev, &old); @@ -4993,13 +5047,21 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_set(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); + ixgbe_dev_clear_queues(dev); + return err; + } ixgbevf_dev_rxtx_start(dev); /* check and configure queue intr-vector mapping */ if (dev->data->dev_conf.intr_conf.rxq != 0) { - intr_vector = dev->data->nb_rx_queues; + /* According to datasheet, only vector 0/1/2 can be used, + * now only one vector is used for Rx queue + */ + intr_vector = 1; if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; } @@ -5016,6 +5078,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } ixgbevf_configure_msix(dev); + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). + * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) + * is not cleared, it will fail when following rte_intr_enable( ) tries + * to map Rx queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ @@ -5078,6 +5149,23 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) ixgbevf_remove_mac_addr(dev, 0); } +/* + * Reset VF device + */ +static int +ixgbevf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = eth_ixgbevf_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbevf_dev_init(dev); + + return ret; +} + static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5153,7 +5241,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } -static void +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct ixgbe_hw *hw = @@ -5168,6 +5256,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) for (i = 0; i < hw->mac.max_rx_queues; i++) ixgbevf_vlan_strip_queue_set(dev, i, on); } + + return 0; } int @@ -5450,13 +5540,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); /* write pool mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), mp_msb); } /* write VLAN mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), mv_msb); @@ -5509,9 +5599,12 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vec = IXGBE_MISC_VEC_ID; mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask |= (1 << IXGBE_MISC_VEC_ID); + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + mask |= (1 << vec); RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); @@ -5526,9 +5619,14 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = IXGBE_MISC_VEC_ID; mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask &= ~(1 << IXGBE_MISC_VEC_ID); + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + mask &= ~(1 << vec); RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); @@ -5670,6 +5768,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t q_idx; uint32_t vector_idx = IXGBE_MISC_VEC_ID; + uint32_t base = IXGBE_MISC_VEC_ID; /* Configure VF other cause ivar */ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); @@ -5680,6 +5779,11 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) if (!rte_intr_dp_is_en(intr_handle)) return; + if (rte_intr_allow_others(intr_handle)) { + base = IXGBE_RX_VEC_START; + vector_idx = IXGBE_RX_VEC_START; + } + /* Configure all RX queues of VF */ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { /* Force all queue use vector 0, @@ -5687,6 +5791,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) */ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); intr_handle->intr_vec[q_idx] = vector_idx; + if (vector_idx < base + intr_handle->nb_efd - 1) + vector_idx++; } } @@ -6313,7 +6419,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, sizeof(struct ixgbe_5tuple_filter), 0); if (filter == NULL) return -ENOMEM; - (void)rte_memcpy(&filter->filter_info, + rte_memcpy(&filter->filter_info, &filter_5tuple, sizeof(struct ixgbe_5tuple_filter_info)); filter->queue = ntuple_filter->queue; @@ -7153,6 +7259,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); struct ixgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; uint8_t i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) @@ -7160,19 +7268,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, else dcb_info->nb_tcs = 1; + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + if (dcb_config->vt_mode) { /* vt is enabled*/ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; - for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { - for (j = 0; j < dcb_info->nb_tcs; j++) { - dcb_info->tc_queue.tc_rxq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; - dcb_info->tc_queue.tc_txq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } } } } else { /* vt is disabled*/ @@ -7529,7 +7649,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, if (!node) return -ENOMEM; - (void)rte_memcpy(&node->key, + rte_memcpy(&node->key, &key, sizeof(struct ixgbe_l2_tn_key)); node->pool = l2_tunnel->pool; diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index caa50c8b..51ddcfd4 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -38,9 +38,13 @@ #include "base/ixgbe_dcb_82599.h" #include "base/ixgbe_dcb_82598.h" #include "ixgbe_bypass.h" +#ifdef RTE_LIBRTE_SECURITY +#include "ixgbe_ipsec.h" +#endif #include <rte_time.h> #include <rte_hash.h> #include <rte_pci.h> +#include <rte_bus_pci.h> #include <rte_tm_driver.h> /* need update link, bit flag */ @@ -364,49 +368,6 @@ struct rte_flow { enum rte_filter_type filter_type; void *rule; }; -/* ntuple filter list structure */ -struct ixgbe_ntuple_filter_ele { - TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries; - struct rte_eth_ntuple_filter filter_info; -}; -/* ethertype filter list structure */ -struct ixgbe_ethertype_filter_ele { - TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries; - struct rte_eth_ethertype_filter filter_info; -}; -/* syn filter list structure */ -struct ixgbe_eth_syn_filter_ele { - TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries; - struct rte_eth_syn_filter filter_info; -}; -/* fdir filter list structure */ -struct ixgbe_fdir_rule_ele { - TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries; - struct ixgbe_fdir_rule filter_info; -}; -/* l2_tunnel filter list structure */ -struct ixgbe_eth_l2_tunnel_conf_ele { - TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; - struct rte_eth_l2_tunnel_conf filter_info; -}; -/* ixgbe_flow memory list structure */ -struct ixgbe_flow_mem { - TAILQ_ENTRY(ixgbe_flow_mem) entries; - struct rte_flow *flow; -}; - -TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele); -struct ixgbe_ntuple_filter_list filter_ntuple_list; -TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele); -struct ixgbe_ethertype_filter_list filter_ethertype_list; -TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); -struct ixgbe_syn_filter_list filter_syn_list; -TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); -struct ixgbe_fdir_rule_filter_list filter_fdir_list; -TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); -struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; -TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); -struct ixgbe_flow_mem_list ixgbe_flow_list; /* * Statistics counters collected by the MACsec @@ -529,7 +490,9 @@ struct ixgbe_adapter { struct ixgbe_filter_info filter; struct ixgbe_l2_tn_info l2_tn; struct ixgbe_bw_conf bw_conf; - +#ifdef RTE_LIBRTE_SECURITY + struct ixgbe_ipsec ipsec; +#endif bool rx_bulk_alloc_allowed; bool rx_vec_allowed; struct rte_timecounter systime_tc; @@ -586,6 +549,9 @@ struct ixgbe_adapter { #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \ (&((struct ixgbe_adapter *)adapter)->tm_conf) +#define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\ + (&((struct ixgbe_adapter *)adapter)->ipsec) + /* * RX/TX function prototypes */ @@ -692,6 +658,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, int ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); +void ixgbe_filterlist_init(void); void ixgbe_filterlist_flush(void); /* * Flow director function prototypes diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index eb2d5581..9281dc1a 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -1276,7 +1276,8 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, rule->ixgbe_fdir.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV6) && (info->mask.src_port_mask != 0 || - info->mask.dst_port_mask != 0)) { + info->mask.dst_port_mask != 0) && + rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { PMD_DRV_LOG(ERR, "By this device," " IPv4 is not supported without" " L4 protocol and ports masked!"); @@ -1347,7 +1348,7 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, 0); if (!node) return -ENOMEM; - (void)rte_memcpy(&node->ixgbe_fdir, + rte_memcpy(&node->ixgbe_fdir, &rule->ixgbe_fdir, sizeof(union ixgbe_atr_input)); node->fdirflags = fdircmd_flags; diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index d6796088..19c2d479 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -51,7 +51,6 @@ #include <rte_atomic.h> #include <rte_branch_prediction.h> #include <rte_memory.h> -#include <rte_memzone.h> #include <rte_eal.h> #include <rte_alarm.h> #include <rte_ether.h> @@ -79,6 +78,51 @@ #define IXGBE_MAX_N_TUPLE_PRIO 7 #define IXGBE_MAX_FLX_SOURCE_OFF 62 +/* ntuple filter list structure */ +struct ixgbe_ntuple_filter_ele { + TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; +/* ethertype filter list structure */ +struct ixgbe_ethertype_filter_ele { + TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; +/* syn filter list structure */ +struct ixgbe_eth_syn_filter_ele { + TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries; + struct rte_eth_syn_filter filter_info; +}; +/* fdir filter list structure */ +struct ixgbe_fdir_rule_ele { + TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries; + struct ixgbe_fdir_rule filter_info; +}; +/* l2_tunnel filter list structure */ +struct ixgbe_eth_l2_tunnel_conf_ele { + TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; + struct rte_eth_l2_tunnel_conf filter_info; +}; +/* ixgbe_flow memory list structure */ +struct ixgbe_flow_mem { + TAILQ_ENTRY(ixgbe_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele); +TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele); +TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); +TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); +TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); + +static struct ixgbe_ntuple_filter_list filter_ntuple_list; +static struct ixgbe_ethertype_filter_list filter_ethertype_list; +static struct ixgbe_syn_filter_list filter_syn_list; +static struct ixgbe_fdir_rule_filter_list filter_fdir_list; +static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct ixgbe_flow_mem_list ixgbe_flow_list; + /** * Endless loop will never happen with below assumption * 1. there is at least one no-void item(END) @@ -142,6 +186,9 @@ const struct rte_flow_action *next_no_void_action( * END * other members in mask and spec should set to 0x00. * item->last should be NULL. + * + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY. + * */ static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, @@ -181,6 +228,43 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } +#ifdef RTE_LIBRTE_SECURITY + /** + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY + */ + act = next_no_void_action(actions, NULL); + if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + const void *conf = act->conf; + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* get the IP pattern*/ + item = next_no_void_pattern(pattern, NULL); + while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + if (item->last || + item->type == RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "IP pattern missing."); + return -rte_errno; + } + item = next_no_void_pattern(pattern, item); + } + + filter->proto = IPPROTO_ESP; + return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec, + item->type == RTE_FLOW_ITEM_TYPE_IPV6); + } +#endif + /* the first not void item can be MAC or IPv4 */ item = next_no_void_pattern(pattern, NULL); @@ -474,6 +558,12 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, if (ret) return ret; +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (filter->proto == IPPROTO_ESP) + return 0; +#endif + /* Ixgbe doesn't support tcp flags. */ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -1004,7 +1094,7 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, * The first not void item can be E_TAG. * The next not void item must be END. * action: - * The first not void action should be QUEUE. + * The first not void action should be VF or PF. * The next not void action should be END. * pattern example: * ITEM Spec Mask @@ -1015,7 +1105,8 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev, * item->last should be NULL. */ static int -cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, +cons_parse_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_eth_l2_tunnel_conf *filter, @@ -1025,7 +1116,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_e_tag *e_tag_spec; const struct rte_flow_item_e_tag *e_tag_mask; const struct rte_flow_action *act; - const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *act_vf; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1133,9 +1225,10 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* check if the first not void action is QUEUE. */ + /* check if the first not void action is VF or PF. */ act = next_no_void_action(actions, NULL); - if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + if (act->type != RTE_FLOW_ACTION_TYPE_VF && + act->type != RTE_FLOW_ACTION_TYPE_PF) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1143,8 +1236,12 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - act_q = (const struct rte_flow_action_queue *)act->conf; - filter->pool = act_q->index; + if (act->type == RTE_FLOW_ACTION_TYPE_VF) { + act_vf = (const struct rte_flow_action_vf *)act->conf; + filter->pool = act_vf->id; + } else { + filter->pool = pci_dev->max_vfs; + } /* check if the next not void item is END */ act = next_no_void_action(actions, act); @@ -1169,8 +1266,10 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t vf_num; - ret = cons_parse_l2_tn_filter(attr, pattern, + ret = cons_parse_l2_tn_filter(dev, attr, pattern, actions, l2_tn_filter, error); if (hw->mac.type != ixgbe_mac_X550 && @@ -1183,7 +1282,9 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } - if (l2_tn_filter->pool >= dev->data->nb_rx_queues) + vf_num = pci_dev->max_vfs; + + if (l2_tn_filter->pool > vf_num) return -rte_errno; return ret; @@ -2600,6 +2701,17 @@ step_next: } void +ixgbe_filterlist_init(void) +{ + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&ixgbe_flow_list); +} + +void ixgbe_filterlist_flush(void) { struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; @@ -2702,12 +2814,23 @@ ixgbe_flow_create(struct rte_eth_dev *dev, memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); + +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (ntuple_filter.proto == IPPROTO_ESP) + return flow; +#endif + if (!ret) { ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); if (!ret) { ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter", sizeof(struct ixgbe_ntuple_filter_ele), 0); - (void)rte_memcpy(&ntuple_filter_ptr->filter_info, + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&ntuple_filter_ptr->filter_info, &ntuple_filter, sizeof(struct rte_eth_ntuple_filter)); TAILQ_INSERT_TAIL(&filter_ntuple_list, @@ -2729,7 +2852,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, ethertype_filter_ptr = rte_zmalloc( "ixgbe_ethertype_filter", sizeof(struct ixgbe_ethertype_filter_ele), 0); - (void)rte_memcpy(ðertype_filter_ptr->filter_info, + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(ðertype_filter_ptr->filter_info, ðertype_filter, sizeof(struct rte_eth_ethertype_filter)); TAILQ_INSERT_TAIL(&filter_ethertype_list, @@ -2749,7 +2876,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter", sizeof(struct ixgbe_eth_syn_filter_ele), 0); - (void)rte_memcpy(&syn_filter_ptr->filter_info, + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&syn_filter_ptr->filter_info, &syn_filter, sizeof(struct rte_eth_syn_filter)); TAILQ_INSERT_TAIL(&filter_syn_list, @@ -2809,7 +2940,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter", sizeof(struct ixgbe_fdir_rule_ele), 0); - (void)rte_memcpy(&fdir_rule_ptr->filter_info, + if (!fdir_rule_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule, sizeof(struct ixgbe_fdir_rule)); TAILQ_INSERT_TAIL(&filter_fdir_list, @@ -2842,7 +2977,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, if (!ret) { l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter", sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0); - (void)rte_memcpy(&l2_tn_filter_ptr->filter_info, + if (!l2_tn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter, sizeof(struct rte_eth_l2_tunnel_conf)); TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, @@ -2941,7 +3080,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_NTUPLE: ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(&ntuple_filter, + rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info, sizeof(struct rte_eth_ntuple_filter)); ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); @@ -2954,7 +3093,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_ETHERTYPE: ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(ðertype_filter, + rte_memcpy(ðertype_filter, ðertype_filter_ptr->filter_info, sizeof(struct rte_eth_ethertype_filter)); ret = ixgbe_add_del_ethertype_filter(dev, @@ -2968,7 +3107,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_SYN: syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *) pmd_flow->rule; - (void)rte_memcpy(&syn_filter, + rte_memcpy(&syn_filter, &syn_filter_ptr->filter_info, sizeof(struct rte_eth_syn_filter)); ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE); @@ -2980,7 +3119,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, break; case RTE_ETH_FILTER_FDIR: fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule; - (void)rte_memcpy(&fdir_rule, + rte_memcpy(&fdir_rule, &fdir_rule_ptr->filter_info, sizeof(struct ixgbe_fdir_rule)); ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE); @@ -2995,7 +3134,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev, case RTE_ETH_FILTER_L2_TUNNEL: l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *) pmd_flow->rule; - (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, + rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, sizeof(struct rte_eth_l2_tunnel_conf)); ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); if (!ret) { diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c new file mode 100644 index 00000000..105da11a --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_ipsec.c @@ -0,0 +1,737 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_ethdev.h> +#include <rte_ethdev_pci.h> +#include <rte_ip.h> +#include <rte_jhash.h> +#include <rte_security_driver.h> +#include <rte_cryptodev.h> +#include <rte_flow.h> + +#include "base/ixgbe_type.h" +#include "base/ixgbe_api.h" +#include "ixgbe_ethdev.h" +#include "ixgbe_ipsec.h" + +#define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS 5 + +#define IXGBE_WAIT_RREAD \ + IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \ + IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS) +#define IXGBE_WAIT_RWRITE \ + IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \ + IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS) +#define IXGBE_WAIT_TREAD \ + IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \ + IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS) +#define IXGBE_WAIT_TWRITE \ + IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \ + IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS) + +#define CMP_IP(a, b) (\ + (a).ipv6[0] == (b).ipv6[0] && \ + (a).ipv6[1] == (b).ipv6[1] && \ + (a).ipv6[2] == (b).ipv6[2] && \ + (a).ipv6[3] == (b).ipv6[3]) + + +static void +ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i = 0; + + /* clear Rx IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + uint16_t index = i << 3; + uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index; + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0); + IXGBE_WAIT_RWRITE; + } + + /* clear Rx SPI and Rx/Tx SA tables*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + uint32_t index = i << 3; + uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index; + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0); + IXGBE_WAIT_RWRITE; + reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index; + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0); + IXGBE_WAIT_RWRITE; + reg_val = IPSRXIDX_WRITE | index; + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0); + IXGBE_WAIT_TWRITE; + } +} + +static int +ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session) +{ + struct rte_eth_dev *dev = ic_session->dev; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC( + dev->data->dev_private); + uint32_t reg_val; + int sa_index = -1; + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) { + int i, ip_index = -1; + + /* Find a match in the IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (CMP_IP(priv->rx_ip_tbl[i].ip, + ic_session->dst_ip)) { + ip_index = i; + break; + } + } + /* If no match, find a free entry in the IP table*/ + if (ip_index < 0) { + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (priv->rx_ip_tbl[i].ref_count == 0) { + ip_index = i; + break; + } + } + } + + /* Fail if no match and no free entries*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Rx IP table\n"); + return -1; + } + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->rx_sa_tbl[i].used == 0) { + sa_index = i; + break; + } + } + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Rx SA table\n"); + return -1; + } + + priv->rx_ip_tbl[ip_index].ip.ipv6[0] = + ic_session->dst_ip.ipv6[0]; + priv->rx_ip_tbl[ip_index].ip.ipv6[1] = + ic_session->dst_ip.ipv6[1]; + priv->rx_ip_tbl[ip_index].ip.ipv6[2] = + ic_session->dst_ip.ipv6[2]; + priv->rx_ip_tbl[ip_index].ip.ipv6[3] = + ic_session->dst_ip.ipv6[3]; + priv->rx_ip_tbl[ip_index].ref_count++; + + priv->rx_sa_tbl[sa_index].spi = + rte_cpu_to_be_32(ic_session->spi); + priv->rx_sa_tbl[sa_index].ip_index = ip_index; + priv->rx_sa_tbl[sa_index].key[3] = + rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]); + priv->rx_sa_tbl[sa_index].key[2] = + rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]); + priv->rx_sa_tbl[sa_index].key[1] = + rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]); + priv->rx_sa_tbl[sa_index].key[0] = + rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]); + priv->rx_sa_tbl[sa_index].salt = + rte_cpu_to_be_32(ic_session->salt); + priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID; + if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) + priv->rx_sa_tbl[sa_index].mode |= + (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT); + if (ic_session->dst_ip.type == IPv6) + priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6; + priv->rx_sa_tbl[sa_index].used = 1; + + /* write IP table entry*/ + reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | + IPSRXIDX_TABLE_IP | (ip_index << 3); + if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) { + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), + priv->rx_ip_tbl[ip_index].ip.ipv4); + } else { + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), + priv->rx_ip_tbl[ip_index].ip.ipv6[0]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), + priv->rx_ip_tbl[ip_index].ip.ipv6[1]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), + priv->rx_ip_tbl[ip_index].ip.ipv6[2]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), + priv->rx_ip_tbl[ip_index].ip.ipv6[3]); + } + IXGBE_WAIT_RWRITE; + + /* write SPI table entry*/ + reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | + IPSRXIDX_TABLE_SPI | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, + priv->rx_sa_tbl[sa_index].spi); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, + priv->rx_sa_tbl[sa_index].ip_index); + IXGBE_WAIT_RWRITE; + + /* write Key table entry*/ + reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | + IPSRXIDX_TABLE_KEY | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), + priv->rx_sa_tbl[sa_index].key[0]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), + priv->rx_sa_tbl[sa_index].key[1]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), + priv->rx_sa_tbl[sa_index].key[2]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), + priv->rx_sa_tbl[sa_index].key[3]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, + priv->rx_sa_tbl[sa_index].salt); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, + priv->rx_sa_tbl[sa_index].mode); + IXGBE_WAIT_RWRITE; + + } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */ + int i; + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->tx_sa_tbl[i].used == 0) { + sa_index = i; + break; + } + } + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Tx SA table\n"); + return -1; + } + + priv->tx_sa_tbl[sa_index].spi = + rte_cpu_to_be_32(ic_session->spi); + priv->tx_sa_tbl[sa_index].key[3] = + rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]); + priv->tx_sa_tbl[sa_index].key[2] = + rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]); + priv->tx_sa_tbl[sa_index].key[1] = + rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]); + priv->tx_sa_tbl[sa_index].key[0] = + rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]); + priv->tx_sa_tbl[sa_index].salt = + rte_cpu_to_be_32(ic_session->salt); + + reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), + priv->tx_sa_tbl[sa_index].key[0]); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), + priv->tx_sa_tbl[sa_index].key[1]); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), + priv->tx_sa_tbl[sa_index].key[2]); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), + priv->tx_sa_tbl[sa_index].key[3]); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, + priv->tx_sa_tbl[sa_index].salt); + IXGBE_WAIT_TWRITE; + + priv->tx_sa_tbl[i].used = 1; + ic_session->sa_index = sa_index; + } + + return 0; +} + +static int +ixgbe_crypto_remove_sa(struct rte_eth_dev *dev, + struct ixgbe_crypto_session *ic_session) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_ipsec *priv = + IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private); + uint32_t reg_val; + int sa_index = -1; + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) { + int i, ip_index = -1; + + /* Find a match in the IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) { + ip_index = i; + break; + } + } + + /* Fail if no match*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Rx IP table\n"); + return -1; + } + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->rx_sa_tbl[i].spi == + rte_cpu_to_be_32(ic_session->spi)) { + sa_index = i; + break; + } + } + /* Fail if no match*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Rx SA table\n"); + return -1; + } + + /* Disable and clear Rx SPI and key table table entryes*/ + reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0); + IXGBE_WAIT_RWRITE; + reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0); + IXGBE_WAIT_RWRITE; + priv->rx_sa_tbl[sa_index].used = 0; + + /* If last used then clear the IP table entry*/ + priv->rx_ip_tbl[ip_index].ref_count--; + if (priv->rx_ip_tbl[ip_index].ref_count == 0) { + reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | + (ip_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0); + } + } else { /* session->dir == RTE_CRYPTO_OUTBOUND */ + int i; + + /* Find a match in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->tx_sa_tbl[i].spi == + rte_cpu_to_be_32(ic_session->spi)) { + sa_index = i; + break; + } + } + /* Fail if no match entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Tx SA table\n"); + return -1; + } + reg_val = IPSRXIDX_WRITE | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0); + IXGBE_WAIT_TWRITE; + + priv->tx_sa_tbl[sa_index].used = 0; + } + + return 0; +} + +static int +ixgbe_crypto_create_session(void *device, + struct rte_security_session_conf *conf, + struct rte_security_session *session, + struct rte_mempool *mempool) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; + struct ixgbe_crypto_session *ic_session = NULL; + struct rte_crypto_aead_xform *aead_xform; + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + + if (rte_mempool_get(mempool, (void **)&ic_session)) { + PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool"); + return -ENOMEM; + } + + if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD || + conf->crypto_xform->aead.algo != + RTE_CRYPTO_AEAD_AES_GCM) { + PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n"); + return -ENOTSUP; + } + aead_xform = &conf->crypto_xform->aead; + + if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) { + ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION; + } else { + PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n"); + return -ENOTSUP; + } + } else { + if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) { + ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION; + } else { + PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n"); + return -ENOTSUP; + } + } + + ic_session->key = aead_xform->key.data; + memcpy(&ic_session->salt, + &aead_xform->key.data[aead_xform->key.length], 4); + ic_session->spi = conf->ipsec.spi; + ic_session->dev = eth_dev; + + set_sec_session_private_data(session, ic_session); + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) { + if (ixgbe_crypto_add_sa(ic_session)) { + PMD_DRV_LOG(ERR, "Failed to add SA\n"); + return -EPERM; + } + } + + return 0; +} + +static int +ixgbe_crypto_remove_session(void *device, + struct rte_security_session *session) +{ + struct rte_eth_dev *eth_dev = device; + struct ixgbe_crypto_session *ic_session = + (struct ixgbe_crypto_session *) + get_sec_session_private_data(session); + struct rte_mempool *mempool = rte_mempool_from_obj(ic_session); + + if (eth_dev != ic_session->dev) { + PMD_DRV_LOG(ERR, "Session not bound to this device\n"); + return -ENODEV; + } + + if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) { + PMD_DRV_LOG(ERR, "Failed to remove session\n"); + return -EFAULT; + } + + rte_mempool_put(mempool, (void *)ic_session); + + return 0; +} + +static inline uint8_t +ixgbe_crypto_compute_pad_len(struct rte_mbuf *m) +{ + if (m->nb_segs == 1) { + /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size + * payload padding size is stored at <pkt_len - 18> + */ + uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - + (ESP_TRAILER_SIZE + ESP_ICV_SIZE)); + return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE; + } + return 0; +} + +static int +ixgbe_crypto_update_mb(void *device __rte_unused, + struct rte_security_session *session, + struct rte_mbuf *m, void *params __rte_unused) +{ + struct ixgbe_crypto_session *ic_session = + get_sec_session_private_data(session); + if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) { + union ixgbe_crypto_tx_desc_md *mdata = + (union ixgbe_crypto_tx_desc_md *)&m->udata64; + mdata->enc = 1; + mdata->sa_idx = ic_session->sa_index; + mdata->pad_len = ixgbe_crypto_compute_pad_len(m); + } + return 0; +} + + +static const struct rte_security_capability * +ixgbe_crypto_capabilities_get(void *device __rte_unused) +{ + static const struct rte_cryptodev_capabilities + aes_gcm_gmac_crypto_capabilities[] = { + { /* AES GMAC (128-bit) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES GCM (128-bit) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 0, + .max = 65535, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { + .op = RTE_CRYPTO_OP_TYPE_UNDEFINED, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED + }, } + }, + }; + + static const struct rte_security_capability + ixgbe_security_capabilities[] = { + { /* IPsec Inline Crypto ESP Transport Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto ESP Transport Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = 0 + }, + { /* IPsec Inline Crypto ESP Tunnel Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto ESP Tunnel Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = 0 + }, + { + .action = RTE_SECURITY_ACTION_TYPE_NONE + } + }; + + return ixgbe_security_capabilities; +} + + +int +ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* sanity checks */ + if (dev->data->dev_conf.rxmode.enable_lro) { + PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); + return -1; + } + if (!dev->data->dev_conf.rxmode.hw_strip_crc) { + PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); + return -1; + } + + + /* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/ + IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15); + + /* IFG needs to be set to 3 when we are using security. Otherwise a Tx + * hang will occur with heavy traffic. + */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg = (reg & 0xFFFFFFF0) | 0x3; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); + reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) { + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); + reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + if (reg != 0) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) { + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, + IXGBE_SECTXCTRL_STORE_FORWARD); + reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + + ixgbe_crypto_clear_ipsec_tables(dev); + + return 0; +} + +int +ixgbe_crypto_add_ingress_sa_from_flow(const void *sess, + const void *ip_spec, + uint8_t is_ipv6) +{ + struct ixgbe_crypto_session *ic_session + = get_sec_session_private_data(sess); + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) { + if (is_ipv6) { + const struct rte_flow_item_ipv6 *ipv6 = ip_spec; + ic_session->src_ip.type = IPv6; + ic_session->dst_ip.type = IPv6; + rte_memcpy(ic_session->src_ip.ipv6, + ipv6->hdr.src_addr, 16); + rte_memcpy(ic_session->dst_ip.ipv6, + ipv6->hdr.dst_addr, 16); + } else { + const struct rte_flow_item_ipv4 *ipv4 = ip_spec; + ic_session->src_ip.type = IPv4; + ic_session->dst_ip.type = IPv4; + ic_session->src_ip.ipv4 = ipv4->hdr.src_addr; + ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr; + } + return ixgbe_crypto_add_sa(ic_session); + } + + return 0; +} + +static struct rte_security_ops ixgbe_security_ops = { + .session_create = ixgbe_crypto_create_session, + .session_update = NULL, + .session_stats_get = NULL, + .session_destroy = ixgbe_crypto_remove_session, + .set_pkt_metadata = ixgbe_crypto_update_mb, + .capabilities_get = ixgbe_crypto_capabilities_get +}; + +struct rte_security_ctx * +ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev) +{ + struct rte_security_ctx *ctx = rte_malloc("rte_security_instances_ops", + sizeof(struct rte_security_ctx), 0); + if (ctx) { + ctx->device = (void *)dev; + ctx->ops = &ixgbe_security_ops; + ctx->sess_cnt = 0; + } + return ctx; +} diff --git a/drivers/net/ixgbe/ixgbe_ipsec.h b/drivers/net/ixgbe/ixgbe_ipsec.h new file mode 100644 index 00000000..fb8fefc8 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_ipsec.h @@ -0,0 +1,151 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef IXGBE_IPSEC_H_ +#define IXGBE_IPSEC_H_ + +#include <rte_security.h> + +#define IPSRXIDX_RX_EN 0x00000001 +#define IPSRXIDX_TABLE_IP 0x00000002 +#define IPSRXIDX_TABLE_SPI 0x00000004 +#define IPSRXIDX_TABLE_KEY 0x00000006 +#define IPSRXIDX_WRITE 0x80000000 +#define IPSRXIDX_READ 0x40000000 +#define IPSRXMOD_VALID 0x00000001 +#define IPSRXMOD_PROTO 0x00000004 +#define IPSRXMOD_DECRYPT 0x00000008 +#define IPSRXMOD_IPV6 0x00000010 +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + +#define IPSEC_MAX_RX_IP_COUNT 128 +#define IPSEC_MAX_SA_COUNT 1024 + +#define ESP_ICV_SIZE 16 +#define ESP_TRAILER_SIZE 2 + +enum ixgbe_operation { + IXGBE_OP_AUTHENTICATED_ENCRYPTION, + IXGBE_OP_AUTHENTICATED_DECRYPTION +}; + +enum ixgbe_gcm_key { + IXGBE_GCM_KEY_128, + IXGBE_GCM_KEY_256 +}; + +/** + * Generic IP address structure + * TODO: Find better location for this rte_net.h possibly. + **/ +struct ipaddr { + enum ipaddr_type { + IPv4, + IPv6 + } type; + /**< IP Address Type - IPv4/IPv6 */ + + union { + uint32_t ipv4; + uint32_t ipv6[4]; + }; +}; + +/** inline crypto crypto private session structure */ +struct ixgbe_crypto_session { + enum ixgbe_operation op; + uint8_t *key; + uint32_t salt; + uint32_t sa_index; + uint32_t spi; + struct ipaddr src_ip; + struct ipaddr dst_ip; + struct rte_eth_dev *dev; +} __rte_cache_aligned; + +struct ixgbe_crypto_rx_ip_table { + struct ipaddr ip; + uint16_t ref_count; +}; +struct ixgbe_crypto_rx_sa_table { + uint32_t spi; + uint32_t ip_index; + uint32_t key[4]; + uint32_t salt; + uint8_t mode; + uint8_t used; +}; + +struct ixgbe_crypto_tx_sa_table { + uint32_t spi; + uint32_t key[4]; + uint32_t salt; + uint8_t used; +}; + +union ixgbe_crypto_tx_desc_md { + uint64_t data; + struct { + /**< SA table index */ + uint32_t sa_idx; + /**< ICV and ESP trailer length */ + uint8_t pad_len; + /**< enable encryption */ + uint8_t enc; + }; +}; + +struct ixgbe_ipsec { + struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT]; + struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT]; + struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT]; +}; + + +struct rte_security_ctx * +ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev); +int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev); +int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess, + const void *ip_spec, + uint8_t is_ipv6); + + + +#endif /*IXGBE_IPSEC_H_*/ diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index c0d86c76..676e92c7 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -627,6 +627,18 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + struct rte_eth_conf *eth_conf; + struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf; + u8 num_tcs; + struct ixgbe_hw *hw; + u32 vmvir; +#define IXGBE_VMVIR_VLANA_MASK 0xC0000000 +#define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF +#define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000 +#define VLAN_PRIO_SHIFT 13 + u32 vlana; + u32 vid; + u32 user_priority; /* Verify if the PF supports the mbox APIs version or not */ switch (vfinfo[vf].api_version) { @@ -645,10 +657,51 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) /* Notify VF of default queue */ msgbuf[IXGBE_VF_DEF_QUEUE] = default_q; - /* - * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN] - * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS - */ + /* Notify VF of number of DCB traffic classes */ + eth_conf = &dev->data->dev_conf; + switch (eth_conf->txmode.mq_mode) { + case ETH_MQ_TX_NONE: + case ETH_MQ_TX_DCB: + RTE_LOG(ERR, PMD, "PF must work with virtualization for VF %u" + ", but its tx mode = %d\n", vf, + eth_conf->txmode.mq_mode); + return -1; + + case ETH_MQ_TX_VMDQ_DCB: + vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; + switch (vmdq_dcb_tx_conf->nb_queue_pools) { + case ETH_16_POOLS: + num_tcs = ETH_8_TCS; + break; + case ETH_32_POOLS: + num_tcs = ETH_4_TCS; + break; + default: + return -1; + } + break; + + /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */ + case ETH_MQ_TX_VMDQ_ONLY: + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf)); + vlana = vmvir & IXGBE_VMVIR_VLANA_MASK; + vid = vmvir & IXGBE_VMVIR_VLAN_VID_MASK; + user_priority = + (vmvir & IXGBE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT; + if ((vlana == IXGBE_VMVIR_VLANA_DEFAULT) && + ((vid != 0) || (user_priority != 0))) + num_tcs = 1; + else + num_tcs = 0; + break; + + default: + RTE_LOG(ERR, PMD, "PF work with invalid mode = %d\n", + eth_conf->txmode.mq_mode); + return -1; + } + msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; return 0; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 64bff258..012d9ee8 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -93,6 +93,7 @@ PKT_TX_TCP_SEG | \ PKT_TX_MACSEC | \ PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_SEC_OFFLOAD | \ IXGBE_TX_IEEE1588_TMST) #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \ @@ -184,7 +185,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) int i; for (i = 0; i < 4; ++i, ++txdp, ++pkts) { - buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); + buf_dma_addr = rte_mbuf_data_iova(*pkts); pkt_len = (*pkts)->data_len; /* write data to descriptor */ @@ -207,7 +208,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) uint64_t buf_dma_addr; uint32_t pkt_len; - buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); + buf_dma_addr = rte_mbuf_data_iova(*pkts); pkt_len = (*pkts)->data_len; /* write data to descriptor */ @@ -395,7 +396,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, static inline void ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, volatile struct ixgbe_adv_tx_context_desc *ctx_txd, - uint64_t ol_flags, union ixgbe_tx_offload tx_offload) + uint64_t ol_flags, union ixgbe_tx_offload tx_offload, + __rte_unused uint64_t *mdata) { uint32_t type_tucmd_mlhl; uint32_t mss_l4len_idx = 0; @@ -479,6 +481,21 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, seqnum_seed |= tx_offload.l2_len << IXGBE_ADVTXD_TUNNEL_LEN; } +#ifdef RTE_LIBRTE_SECURITY + if (ol_flags & PKT_TX_SEC_OFFLOAD) { + union ixgbe_crypto_tx_desc_md *md = + (union ixgbe_crypto_tx_desc_md *)mdata; + seqnum_seed |= + (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx); + type_tucmd_mlhl |= md->enc ? + (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | + IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0; + type_tucmd_mlhl |= + (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK); + tx_offload_mask.sa_idx |= ~0; + tx_offload_mask.sec_pad_len |= ~0; + } +#endif txq->ctx_cache[ctx_idx].flags = ol_flags; txq->ctx_cache[ctx_idx].tx_offload.data[0] = @@ -657,6 +674,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union ixgbe_tx_offload tx_offload; +#ifdef RTE_LIBRTE_SECURITY + uint8_t use_ipsec; +#endif tx_offload.data[0] = 0; tx_offload.data[1] = 0; @@ -684,6 +704,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; +#ifdef RTE_LIBRTE_SECURITY + use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); +#endif /* If hardware offload required */ tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK; @@ -695,6 +718,15 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.tso_segsz = tx_pkt->tso_segsz; tx_offload.outer_l2_len = tx_pkt->outer_l2_len; tx_offload.outer_l3_len = tx_pkt->outer_l3_len; +#ifdef RTE_LIBRTE_SECURITY + if (use_ipsec) { + union ixgbe_crypto_tx_desc_md *ipsec_mdata = + (union ixgbe_crypto_tx_desc_md *) + &tx_pkt->udata64; + tx_offload.sa_idx = ipsec_mdata->sa_idx; + tx_offload.sec_pad_len = ipsec_mdata->pad_len; + } +#endif /* If new context need be built or reuse the exist ctx. */ ctx = what_advctx_update(txq, tx_ol_req, @@ -855,7 +887,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload); + tx_offload, &tx_pkt->udata64); txe->last_id = tx_last; tx_id = txe->next_id; @@ -873,6 +905,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); +#ifdef RTE_LIBRTE_SECURITY + if (use_ipsec) + olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC; +#endif m_seg = tx_pkt; do { @@ -888,7 +924,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up Transmit Data Descriptor. */ slen = m_seg->data_len; - buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + buf_dma_addr = rte_mbuf_data_iova(m_seg); txd->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->read.cmd_type_len = @@ -1447,6 +1483,14 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags |= PKT_RX_EIP_CKSUM_BAD; } +#ifdef RTE_LIBRTE_SECURITY + if (rx_status & IXGBE_RXD_STAT_SECP) { + pkt_flags |= PKT_RX_SEC_OFFLOAD; + if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG) + pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED; + } +#endif + return pkt_flags; } @@ -1589,7 +1633,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) mb->data_off = RTE_PKTMBUF_HEADROOM; /* populate the descriptors */ - dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb)); + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -1821,7 +1865,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; @@ -1849,7 +1893,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->port = rxq->port_id; pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + /* Only valid if PKT_RX_VLAN set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); @@ -1940,7 +1984,7 @@ ixgbe_fill_cluster_head_buf( head->port = rxq->port_id; - /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is + /* The vlan_tci field is only valid when PKT_RX_VLAN is * set in the pkt_flags field. */ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); @@ -2115,7 +2159,7 @@ next_desc: if (!bulk_alloc) { __le64 dma = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); /* * Update RX descriptor with the physical address of the * new data buffer of the new allocated mbuf. @@ -2364,8 +2408,11 @@ void __attribute__((cold)) ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) - && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { + if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && +#ifdef RTE_LIBRTE_SECURITY + !(txq->using_ipsec) && +#endif + (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_prepare = NULL; #ifdef RTE_IXGBE_INC_VECTOR @@ -2535,6 +2582,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->txq_flags = tx_conf->txq_flags; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; +#ifdef RTE_LIBRTE_SECURITY + txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY); +#endif /* * Modification to set VFTDT for virtual function if vf is detected @@ -2548,7 +2599,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, else txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx)); - txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring_phys_addr = tz->iova; txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr; /* Allocate software ring */ @@ -2850,7 +2901,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx)); } - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring_phys_addr = rz->iova; rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr; /* @@ -3517,12 +3568,19 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3544,12 +3602,18 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3565,12 +3629,18 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3586,12 +3656,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -4112,7 +4188,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) mbuf->port = rxq->port_id; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); rxd = &rxq->rx_ring[i]; rxd->read.hdr_addr = 0; rxd->read.pkt_addr = dma_addr; @@ -4494,6 +4570,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; rxq->rx_using_sse = rx_using_sse; +#ifdef RTE_LIBRTE_SECURITY + rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY); +#endif } } @@ -4981,6 +5061,21 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) ixgbe_setup_loopback_link_82599(hw); +#ifdef RTE_LIBRTE_SECURITY + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY) || + (dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY)) { + ret = ixgbe_crypto_enable_ipsec(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "ixgbe_crypto_enable_ipsec fails with %d.", + ret); + return ret; + } + } +#endif + return 0; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 85feb0bd..cc7c8288 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -138,8 +138,12 @@ struct ixgbe_rx_queue { uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ - uint16_t rx_using_sse; + uint8_t rx_using_sse; /**< indicates that vector RX is in use */ +#ifdef RTE_LIBRTE_SECURITY + uint8_t using_ipsec; + /**< indicates that IPsec RX feature is in use */ +#endif #ifdef RTE_IXGBE_INC_VECTOR uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ uint16_t rxrearm_start; /**< the idx we start the re-arming from */ @@ -148,7 +152,7 @@ struct ixgbe_rx_queue { uint16_t queue_id; /**< RX queue index. */ uint16_t reg_idx; /**< RX queue register index. */ uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */ - uint8_t port_id; /**< Device port identifier. */ + uint16_t port_id; /**< Device port identifier. */ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint8_t rx_deferred_start; /**< not in global dev start. */ @@ -183,6 +187,11 @@ union ixgbe_tx_offload { /* fields for TX offloading of tunnels */ uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ +#ifdef RTE_LIBRTE_SECURITY + /* inline ipsec related*/ + uint64_t sa_idx:8; /**< TX SA database entry index */ + uint64_t sec_pad_len:4; /**< padding length */ +#endif }; }; @@ -237,7 +246,7 @@ struct ixgbe_tx_queue { uint16_t tx_next_rs; /**< next desc to set RS bit */ uint16_t queue_id; /**< TX queue index. */ uint16_t reg_idx; /**< TX queue register index. */ - uint8_t port_id; /**< Device port identifier. */ + uint16_t port_id; /**< Device port identifier. */ uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold reg. */ @@ -247,6 +256,10 @@ struct ixgbe_tx_queue { struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; const struct ixgbe_txq_ops *ops; /**< txq ops */ uint8_t tx_deferred_start; /**< not in global dev start. */ +#ifdef RTE_LIBRTE_SECURITY + uint8_t using_ipsec; + /**< indicates that IPsec TX feature is in use */ +#endif }; struct ixgbe_txq_ops { diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c index 44de1caa..2e87ffa0 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -87,13 +87,13 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) * Data to be rearmed is 6 bytes long. */ vst1_u8((uint8_t *)&mb0->rearm_data, p); - paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM; + paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM; dma_addr0 = vsetq_lane_u64(paddr, zero, 0); /* flush desc with pa dma_addr */ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0); vst1_u8((uint8_t *)&mb1->rearm_data, p); - paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM; + paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM; dma_addr1 = vsetq_lane_u64(paddr, zero, 0); vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1); } @@ -126,8 +126,8 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2, } vol; const uint8x16_t pkttype_msk = { - PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, - PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, + PKT_RX_VLAN, PKT_RX_VLAN, + PKT_RX_VLAN, PKT_RX_VLAN, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; @@ -414,7 +414,7 @@ vtx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { uint64x2_t descriptor = { - pkt->buf_physaddr + pkt->data_off, + pkt->buf_iova + pkt->data_off, (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len}; vst1q_u64((uint64_t *)&txdp->read, descriptor); diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index e704a7f3..486239ba 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -86,8 +86,8 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) mb0 = rxep[0].mbuf; mb1 = rxep[1].mbuf; - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != offsetof(struct rte_mbuf, buf_addr) + 8); vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr)); vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr)); @@ -122,6 +122,43 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); } +#ifdef RTE_LIBRTE_SECURITY +static inline void +desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i sterr, rearm, tmp_e, tmp_p; + uint32_t *rearm0 = (uint32_t *)rx_pkts[0]->rearm_data + 2; + uint32_t *rearm1 = (uint32_t *)rx_pkts[1]->rearm_data + 2; + uint32_t *rearm2 = (uint32_t *)rx_pkts[2]->rearm_data + 2; + uint32_t *rearm3 = (uint32_t *)rx_pkts[3]->rearm_data + 2; + const __m128i ipsec_sterr_msk = + _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP | + IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED); + const __m128i ipsec_proc_msk = + _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP); + const __m128i ipsec_err_flag = + _mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED | + PKT_RX_SEC_OFFLOAD); + const __m128i ipsec_proc_flag = _mm_set1_epi32(PKT_RX_SEC_OFFLOAD); + + rearm = _mm_set_epi32(*rearm3, *rearm2, *rearm1, *rearm0); + sterr = _mm_set_epi32(_mm_extract_epi32(descs[3], 2), + _mm_extract_epi32(descs[2], 2), + _mm_extract_epi32(descs[1], 2), + _mm_extract_epi32(descs[0], 2)); + sterr = _mm_and_si128(sterr, ipsec_sterr_msk); + tmp_e = _mm_cmpeq_epi32(sterr, ipsec_sterr_msk); + tmp_p = _mm_cmpeq_epi32(sterr, ipsec_proc_msk); + sterr = _mm_or_si128(_mm_and_si128(tmp_e, ipsec_err_flag), + _mm_and_si128(tmp_p, ipsec_proc_flag)); + rearm = _mm_or_si128(rearm, sterr); + *rearm0 = _mm_extract_epi32(rearm, 0); + *rearm1 = _mm_extract_epi32(rearm, 1); + *rearm2 = _mm_extract_epi32(rearm, 2); + *rearm3 = _mm_extract_epi32(rearm, 3); +} +#endif + static inline void desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, struct rte_mbuf **rx_pkts) @@ -310,6 +347,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, volatile union ixgbe_adv_rx_desc *rxdp; struct ixgbe_rx_entry *sw_ring; uint16_t nb_pkts_recd; +#ifdef RTE_LIBRTE_SECURITY + uint8_t use_ipsec = rxq->using_ipsec; +#endif int pos; uint64_t var; __m128i shuf_msk; @@ -397,7 +437,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, sw_ring = &rxq->sw_ring[rxq->rx_tail]; /* ensure these 2 flags are in the lower 8 bits */ - RTE_BUILD_BUG_ON((PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED) > UINT8_MAX); + RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX); vlan_flags = rxq->vlan_flags & UINT8_MAX; /* A. load 4 packet in one loop @@ -473,6 +513,11 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* set ol_flags with vlan packet type */ desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]); +#ifdef RTE_LIBRTE_SECURITY + if (unlikely(use_ipsec)) + desc_to_olflags_v_ipsec(descs, &rx_pkts[pos]); +#endif + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); @@ -604,7 +649,7 @@ vtx1(volatile union ixgbe_adv_tx_desc *txdp, { __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len, - pkt->buf_physaddr + pkt->data_off); + pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)&txdp->read, descriptor); } diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c index cdcf45cb..ca4182c9 100644 --- a/drivers/net/ixgbe/ixgbe_tm.c +++ b/drivers/net/ixgbe/ixgbe_tm.c @@ -312,7 +312,7 @@ ixgbe_shaper_profile_add(struct rte_eth_dev *dev, if (!shaper_profile) return -ENOMEM; shaper_profile->shaper_profile_id = shaper_profile_id; - (void)rte_memcpy(&shaper_profile->profile, profile, + rte_memcpy(&shaper_profile->profile, profile, sizeof(struct rte_tm_shaper_params)); TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list, shaper_profile, node); @@ -482,7 +482,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no, } static int -ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id, +ixgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, uint32_t priority, uint32_t weight, struct rte_tm_node_params *params, struct rte_tm_error *error) @@ -517,8 +517,8 @@ ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id, return -EINVAL; } - /* for root node */ - if (parent_node_id == RTE_TM_NODE_ID_NULL) { + /* for non-leaf node */ + if (node_id >= dev->data->nb_tx_queues) { /* check the unsupported parameters */ if (params->nonleaf.wfq_weight_mode) { error->type = @@ -542,7 +542,7 @@ ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id, return 0; } - /* for TC or queue node */ + /* for leaf node */ /* check the unsupported parameters */ if (params->leaf.cman) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; @@ -588,7 +588,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX; - struct ixgbe_tm_shaper_profile *shaper_profile; + struct ixgbe_tm_shaper_profile *shaper_profile = NULL; struct ixgbe_tm_node *tm_node; struct ixgbe_tm_node *parent_node; uint8_t nb_tcs; @@ -606,7 +606,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, return -EINVAL; } - ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight, + ret = ixgbe_node_param_check(dev, node_id, priority, weight, params, error); if (ret) return ret; @@ -619,12 +619,15 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, } /* check the shaper profile id */ - shaper_profile = ixgbe_shaper_profile_search(dev, - params->shaper_profile_id); - if (!shaper_profile) { - error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; - error->message = "shaper profile not exist"; - return -EINVAL; + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + shaper_profile = ixgbe_shaper_profile_search( + dev, params->shaper_profile_id); + if (!shaper_profile) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; + error->message = "shaper profile not exist"; + return -EINVAL; + } } /* root node if not have a parent */ @@ -657,12 +660,13 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->no = 0; tm_node->parent = NULL; tm_node->shaper_profile = shaper_profile; - (void)rte_memcpy(&tm_node->params, params, + rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); tm_conf->root = tm_node; /* increase the reference counter of the shaper profile */ - shaper_profile->reference_count++; + if (shaper_profile) + shaper_profile->reference_count++; return 0; } @@ -737,7 +741,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->reference_count = 0; tm_node->parent = parent_node; tm_node->shaper_profile = shaper_profile; - (void)rte_memcpy(&tm_node->params, params, + rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) { tm_node->no = parent_node->reference_count; @@ -753,7 +757,8 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->parent->reference_count++; /* increase the reference counter of the shaper profile */ - shaper_profile->reference_count++; + if (shaper_profile) + shaper_profile->reference_count++; return 0; } @@ -801,14 +806,16 @@ ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, /* root node */ if (node_type == IXGBE_TM_NODE_TYPE_PORT) { - tm_node->shaper_profile->reference_count--; + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; rte_free(tm_node); tm_conf->root = NULL; return 0; } /* TC or queue node */ - tm_node->shaper_profile->reference_count--; + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; tm_node->parent->reference_count--; if (node_type == IXGBE_TM_NODE_TYPE_TC) { TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); @@ -876,15 +883,34 @@ ixgbe_level_capabilities_get(struct rte_eth_dev *dev, cap->n_nodes_max = 1; cap->n_nodes_nonleaf_max = 1; cap->n_nodes_leaf_max = 0; - cap->non_leaf_nodes_identical = true; - cap->leaf_nodes_identical = true; + } else if (level_id == IXGBE_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->n_nodes_leaf_max = 0; + } else { + /* queue */ + cap->n_nodes_max = hw->mac.max_tx_queues; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = hw->mac.max_tx_queues; + } + + cap->non_leaf_nodes_identical = true; + cap->leaf_nodes_identical = true; + + if (level_id != IXGBE_TM_NODE_TYPE_QUEUE) { cap->nonleaf.shaper_private_supported = true; cap->nonleaf.shaper_private_dual_rate_supported = false; cap->nonleaf.shaper_private_rate_min = 0; /* 10Gbps -> 1.25GBps */ cap->nonleaf.shaper_private_rate_max = 1250000000ull; cap->nonleaf.shaper_shared_n_max = 0; - cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + if (level_id == IXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; cap->nonleaf.sched_sp_n_priorities_max = 1; cap->nonleaf.sched_wfq_n_children_per_group_max = 0; cap->nonleaf.sched_wfq_n_groups_max = 0; @@ -894,21 +920,7 @@ ixgbe_level_capabilities_get(struct rte_eth_dev *dev, return 0; } - /* TC or queue node */ - if (level_id == IXGBE_TM_NODE_TYPE_TC) { - /* TC */ - cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; - cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; - cap->n_nodes_leaf_max = 0; - cap->non_leaf_nodes_identical = true; - } else { - /* queue */ - cap->n_nodes_max = hw->mac.max_tx_queues; - cap->n_nodes_nonleaf_max = 0; - cap->n_nodes_leaf_max = hw->mac.max_tx_queues; - cap->non_leaf_nodes_identical = true; - } - cap->leaf_nodes_identical = true; + /* queue node */ cap->leaf.shaper_private_supported = true; cap->leaf.shaper_private_dual_rate_supported = false; cap->leaf.shaper_private_rate_min = 0; @@ -998,7 +1010,8 @@ ixgbe_hierarchy_commit(struct rte_eth_dev *dev, goto done; /* not support port max bandwidth yet */ - if (tm_conf->root->shaper_profile->profile.peak.rate) { + if (tm_conf->root->shaper_profile && + tm_conf->root->shaper_profile->profile.peak.rate) { error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; error->message = "no port max bandwidth"; goto fail_clear; @@ -1006,7 +1019,8 @@ ixgbe_hierarchy_commit(struct rte_eth_dev *dev, /* HW not support TC max bandwidth */ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { - if (tm_node->shaper_profile->profile.peak.rate) { + if (tm_node->shaper_profile && + tm_node->shaper_profile->profile.peak.rate) { error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; error->message = "no TC max bandwidth"; goto fail_clear; @@ -1015,7 +1029,10 @@ ixgbe_hierarchy_commit(struct rte_eth_dev *dev, /* queue max bandwidth */ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { - bw = tm_node->shaper_profile->profile.peak.rate; + if (tm_node->shaper_profile) + bw = tm_node->shaper_profile->profile.peak.rate; + else + bw = 0; if (bw) { /* interpret Bps to Mbps */ bw = bw * 8 / 1000 / 1000; diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c index 79897ff6..f1273785 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.c +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c @@ -38,7 +38,7 @@ #include "rte_pmd_ixgbe.h" int -rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf, +rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf, struct ether_addr *mac_addr) { struct ixgbe_hw *hw; @@ -73,7 +73,7 @@ rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf, } int -rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf) +rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf) { struct ixgbe_hw *hw; struct ixgbe_vf_info *vfinfo; @@ -105,7 +105,7 @@ rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf) } int -rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) { struct ixgbe_hw *hw; struct ixgbe_mac_info *mac; @@ -135,7 +135,7 @@ rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) } int -rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) { struct ixgbe_hw *hw; struct ixgbe_mac_info *mac; @@ -164,7 +164,7 @@ rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) } int -rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id) +rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id) { struct ixgbe_hw *hw; uint32_t ctrl; @@ -200,7 +200,7 @@ rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id) } int -rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on) +rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on) { struct ixgbe_hw *hw; uint32_t ctrl; @@ -230,7 +230,7 @@ rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on) } int -rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on) +rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on) { struct ixgbe_hw *hw; uint32_t reg_value; @@ -260,7 +260,7 @@ rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on) } int -rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on) +rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on) { struct ixgbe_hw *hw; uint32_t reg_value; @@ -295,7 +295,7 @@ rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on) } int -rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) +rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on) { struct rte_eth_dev *dev; struct rte_pci_device *pci_dev; @@ -342,7 +342,7 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) } int -rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, +rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask, uint8_t on) { int val = 0; @@ -389,7 +389,7 @@ rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, } int -rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on) +rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on) { struct rte_eth_dev *dev; struct rte_pci_device *pci_dev; @@ -439,7 +439,7 @@ rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on) } int -rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on) +rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on) { struct rte_eth_dev *dev; struct rte_pci_device *pci_dev; @@ -489,7 +489,7 @@ rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on) } int -rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, +rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on) { struct rte_eth_dev *dev; @@ -524,7 +524,7 @@ rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, } int -rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, +rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk) { struct rte_eth_dev *dev; @@ -540,7 +540,7 @@ rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, } int -rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp) +rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp) { struct ixgbe_hw *hw; struct rte_eth_dev *dev; @@ -623,7 +623,7 @@ rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp) } int -rte_pmd_ixgbe_macsec_disable(uint8_t port) +rte_pmd_ixgbe_macsec_disable(uint16_t port) { struct ixgbe_hw *hw; struct rte_eth_dev *dev; @@ -687,7 +687,7 @@ rte_pmd_ixgbe_macsec_disable(uint8_t port) } int -rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac) +rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac) { struct ixgbe_hw *hw; struct rte_eth_dev *dev; @@ -712,7 +712,7 @@ rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac) } int -rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi) +rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi) { struct ixgbe_hw *hw; struct rte_eth_dev *dev; @@ -738,7 +738,7 @@ rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi) } int -rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an, +rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an, uint32_t pn, uint8_t *key) { struct ixgbe_hw *hw; @@ -794,7 +794,7 @@ rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an, } int -rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an, +rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an, uint32_t pn, uint8_t *key) { struct ixgbe_hw *hw; @@ -837,7 +837,7 @@ rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an, } int -rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port, +rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port, uint8_t tc_num, uint8_t *bw_weight) { @@ -911,7 +911,7 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port, #ifdef RTE_LIBRTE_IXGBE_BYPASS int -rte_pmd_ixgbe_bypass_init(uint8_t port_id) +rte_pmd_ixgbe_bypass_init(uint16_t port_id) { struct rte_eth_dev *dev; @@ -926,7 +926,7 @@ rte_pmd_ixgbe_bypass_init(uint8_t port_id) } int -rte_pmd_ixgbe_bypass_state_show(uint8_t port_id, uint32_t *state) +rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state) { struct rte_eth_dev *dev; @@ -940,7 +940,7 @@ rte_pmd_ixgbe_bypass_state_show(uint8_t port_id, uint32_t *state) } int -rte_pmd_ixgbe_bypass_state_set(uint8_t port_id, uint32_t *new_state) +rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state) { struct rte_eth_dev *dev; @@ -954,7 +954,7 @@ rte_pmd_ixgbe_bypass_state_set(uint8_t port_id, uint32_t *new_state) } int -rte_pmd_ixgbe_bypass_event_show(uint8_t port_id, +rte_pmd_ixgbe_bypass_event_show(uint16_t port_id, uint32_t event, uint32_t *state) { @@ -970,7 +970,7 @@ rte_pmd_ixgbe_bypass_event_show(uint8_t port_id, } int -rte_pmd_ixgbe_bypass_event_store(uint8_t port_id, +rte_pmd_ixgbe_bypass_event_store(uint16_t port_id, uint32_t event, uint32_t state) { @@ -986,7 +986,7 @@ rte_pmd_ixgbe_bypass_event_store(uint8_t port_id, } int -rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port_id, uint32_t timeout) +rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout) { struct rte_eth_dev *dev; @@ -1000,7 +1000,7 @@ rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port_id, uint32_t timeout) } int -rte_pmd_ixgbe_bypass_ver_show(uint8_t port_id, uint32_t *ver) +rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver) { struct rte_eth_dev *dev; @@ -1014,7 +1014,7 @@ rte_pmd_ixgbe_bypass_ver_show(uint8_t port_id, uint32_t *ver) } int -rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout) +rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout) { struct rte_eth_dev *dev; @@ -1028,7 +1028,7 @@ rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout) } int -rte_pmd_ixgbe_bypass_wd_reset(uint8_t port_id) +rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id) { struct rte_eth_dev *dev; diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h index d33c285d..81b18f87 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.h +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h @@ -53,7 +53,7 @@ * - (-ENODEV) if *port* invalid. * - (-EINVAL) if *vf* invalid. */ -int rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf); +int rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf); /** * Set the VF MAC address. @@ -69,7 +69,7 @@ int rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if *vf* or *mac_addr* is invalid. */ -int rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf, +int rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf, struct ether_addr *mac_addr); /** @@ -87,7 +87,8 @@ int rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); +int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, + uint8_t on); /** * Enable/Disable VF MAC anti spoofing. @@ -104,7 +105,7 @@ int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); +int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on); /** * Enable/Disable vf vlan insert @@ -122,7 +123,7 @@ int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, +int rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id); /** @@ -139,7 +140,7 @@ int rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on); +int rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on); /** * set all queues drop enable bit @@ -155,7 +156,7 @@ int rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on); +int rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on); /** * set drop enable bit in the VF split rx control register @@ -174,7 +175,7 @@ int rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on); * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on); +int rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on); /** * Enable/Disable vf vlan strip for all queues in a pool @@ -194,7 +195,7 @@ int rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on); * - (-EINVAL) if bad parameter. */ int -rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on); +rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on); /** * Enable MACsec offload. @@ -212,7 +213,7 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on); * - (-ENODEV) if *port* invalid. * - (-ENOTSUP) if hardware doesn't support this feature. */ -int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp); +int rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp); /** * Disable MACsec offload. @@ -224,7 +225,7 @@ int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp); * - (-ENODEV) if *port* invalid. * - (-ENOTSUP) if hardware doesn't support this feature. */ -int rte_pmd_ixgbe_macsec_disable(uint8_t port); +int rte_pmd_ixgbe_macsec_disable(uint16_t port); /** * Configure Tx SC (Secure Connection). @@ -238,7 +239,7 @@ int rte_pmd_ixgbe_macsec_disable(uint8_t port); * - (-ENODEV) if *port* invalid. * - (-ENOTSUP) if hardware doesn't support this feature. */ -int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac); +int rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac); /** * Configure Rx SC (Secure Connection). @@ -254,7 +255,7 @@ int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac); * - (-ENODEV) if *port* invalid. * - (-ENOTSUP) if hardware doesn't support this feature. */ -int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi); +int rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi); /** * Enable Tx SA (Secure Association). @@ -275,7 +276,7 @@ int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi); * - (-ENOTSUP) if hardware doesn't support this feature. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an, +int rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an, uint32_t pn, uint8_t *key); /** @@ -297,7 +298,7 @@ int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an, * - (-ENOTSUP) if hardware doesn't support this feature. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an, +int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an, uint32_t pn, uint8_t *key); /** @@ -323,7 +324,8 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an, * - (-EINVAL) if bad parameter. */ int -rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on); +rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask, + uint8_t on); /** * Enable or disable a VF traffic receive of an Ethernet device. @@ -342,7 +344,7 @@ rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t * - (-EINVAL) if bad parameter. */ int -rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on); +rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on); /** * Enable or disable a VF traffic transmit of the Ethernet device. @@ -361,7 +363,7 @@ rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on); * - (-EINVAL) if bad parameter. */ int -rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on); +rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on); /** * Enable/Disable hardware VF VLAN filtering by an Ethernet device of @@ -383,7 +385,8 @@ rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on); * - (-EINVAL) if bad parameter. */ int -rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on); +rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on); /** * Set the rate limitation for a vf on an Ethernet device. @@ -402,7 +405,8 @@ rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask, * - (-ENODEV) if *port_id* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); +int rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk); /** * Set all the TCs' bandwidth weight. @@ -423,7 +427,7 @@ int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, * - (-EINVAL) if bad parameter. * - (-ENOTSUP) not supported by firmware. */ -int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port, +int rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port, uint8_t tc_num, uint8_t *bw_weight); @@ -439,7 +443,7 @@ int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port, * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_init(uint8_t port); +int rte_pmd_ixgbe_bypass_init(uint16_t port); /** * Return bypass state. @@ -456,7 +460,7 @@ int rte_pmd_ixgbe_bypass_init(uint8_t port); * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_state_show(uint8_t port, uint32_t *state); +int rte_pmd_ixgbe_bypass_state_show(uint16_t port, uint32_t *state); /** * Set bypass state @@ -473,7 +477,7 @@ int rte_pmd_ixgbe_bypass_state_show(uint8_t port, uint32_t *state); * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_state_set(uint8_t port, uint32_t *new_state); +int rte_pmd_ixgbe_bypass_state_set(uint16_t port, uint32_t *new_state); /** * Return bypass state when given event occurs. @@ -497,7 +501,7 @@ int rte_pmd_ixgbe_bypass_state_set(uint8_t port, uint32_t *new_state); * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_event_show(uint8_t port, +int rte_pmd_ixgbe_bypass_event_show(uint16_t port, uint32_t event, uint32_t *state); @@ -523,7 +527,7 @@ int rte_pmd_ixgbe_bypass_event_show(uint8_t port, * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_event_store(uint8_t port, +int rte_pmd_ixgbe_bypass_event_store(uint16_t port, uint32_t event, uint32_t state); @@ -547,7 +551,7 @@ int rte_pmd_ixgbe_bypass_event_store(uint8_t port, * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port, uint32_t timeout); +int rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port, uint32_t timeout); /** * Get bypass firmware version. @@ -561,7 +565,7 @@ int rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port, uint32_t timeout); * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_ver_show(uint8_t port, uint32_t *ver); +int rte_pmd_ixgbe_bypass_ver_show(uint16_t port, uint32_t *ver); /** * Return bypass watchdog timeout in seconds @@ -583,7 +587,7 @@ int rte_pmd_ixgbe_bypass_ver_show(uint8_t port, uint32_t *ver); * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout); +int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout); /** * Reset bypass watchdog timer @@ -595,7 +599,7 @@ int rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout); * - (-ENOTSUP) if hardware doesn't support. * - (-EINVAL) if bad parameter. */ -int rte_pmd_ixgbe_bypass_wd_reset(uint8_t port); +int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port); /** |