diff options
Diffstat (limited to 'drivers/net/mlx5')
25 files changed, 5923 insertions, 3340 deletions
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index 8a5229e6..2e70dec5 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -33,6 +33,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE) @@ -56,6 +57,7 @@ LDLIBS += -ldl else LDLIBS += -libverbs -lmlx5 endif +LDLIBS += -lmnl LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -100,7 +102,7 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \ infiniband/mlx5dv.h \ - enum MLX5DV_CONTEXT_MASK_STRIDING_RQ \ + enum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_TUNNEL_SUPPORT \ @@ -150,7 +152,237 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \ infiniband/verbs.h \ - enum IBV_FLOW_SPEC_ACTION_COUNT \ + type 'struct ibv_counter_set_init_attr' \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NL_NLDEV \ + rdma/rdma_netlink.h \ + enum RDMA_NL_NLDEV \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_CMD_GET \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_CMD_GET \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_CMD_PORT_GET \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_CMD_PORT_GET \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_ATTR_DEV_INDEX \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_ATTR_DEV_INDEX \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_ATTR_DEV_NAME \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_ATTR_DEV_NAME \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_ATTR_PORT_INDEX \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_ATTR_PORT_INDEX \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \ + rdma/rdma_netlink.h \ + enum RDMA_NLDEV_ATTR_NDEV_INDEX \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IFLA_PHYS_SWITCH_ID \ + linux/if_link.h \ + enum IFLA_PHYS_SWITCH_ID \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IFLA_PHYS_PORT_NAME \ + linux/if_link.h \ + enum IFLA_PHYS_PORT_NAME \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_ACT \ + linux/pkt_cls.h \ + enum TCA_FLOWER_ACT \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_FLAGS \ + linux/pkt_cls.h \ + enum TCA_FLOWER_FLAGS \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_TYPE \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_TYPE \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_ETH_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IP_PROTO \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IP_PROTO \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV4_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV4_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV4_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV4_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV4_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV4_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV6_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV6_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV6_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV6_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV6_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_IPV6_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_TCP_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_TCP_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_TCP_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_TCP_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_TCP_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_TCP_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_TCP_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_UDP_SRC \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_UDP_SRC \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_UDP_SRC_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_UDP_DST \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_UDP_DST \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_UDP_DST_MASK \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_UDP_DST_MASK \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_VLAN_ID \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_VLAN_ID \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_VLAN_PRIO \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_VLAN_PRIO \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE \ + linux/pkt_cls.h \ + enum TCA_FLOWER_KEY_VLAN_ETH_TYPE \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_TC_ACT_VLAN \ + linux/tc_act/tc_vlan.h \ + enum TCA_VLAN_PUSH_VLAN_PRIORITY \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_40000baseKR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_40000baseKR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_40000baseCR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_40000baseCR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_40000baseSR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_40000baseSR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_40000baseLR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_40000baseLR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_56000baseKR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_56000baseKR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_56000baseCR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_56000baseCR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_56000baseSR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_56000baseSR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_SUPPORTED_56000baseLR4_Full \ + /usr/include/linux/ethtool.h \ + define SUPPORTED_56000baseLR4_Full \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_STATIC_ASSERT \ + /usr/include/assert.h \ + define static_assert \ $(AUTOCONF_OUTPUT) # Create mlx5_autoconf.h or update it in case it differs from the new one. @@ -177,7 +409,7 @@ endif $(LIB_GLUE): mlx5_glue.o $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \ -Wl,-h,$(LIB_GLUE) \ - -s -shared -o $@ $< -libverbs -lmlx5 + -shared -o $@ $< -libverbs -lmlx5 mlx5_glue.o: mlx5_autoconf.h diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index c933e274..ec63bc6e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -13,6 +13,7 @@ #include <errno.h> #include <net/if.h> #include <sys/mman.h> +#include <linux/netlink.h> #include <linux/rtnetlink.h> /* Verbs header. */ @@ -36,6 +37,7 @@ #include <rte_kvargs.h> #include <rte_rwlock.h> #include <rte_spinlock.h> +#include <rte_string_fns.h> #include "mlx5.h" #include "mlx5_utils.h" @@ -90,6 +92,9 @@ /* Activate Netlink support in VF mode. */ #define MLX5_VF_NL_EN "vf_nl_en" +/* Select port representors to instantiate. */ +#define MLX5_REPRESENTOR "representor" + #ifndef HAVE_IBV_MLX5_MOD_MPW #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) @@ -237,6 +242,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* In case mlx5_dev_stop() has not been called. */ mlx5_dev_interrupt_handler_uninstall(dev); mlx5_traffic_disable(dev); + mlx5_flow_flush(dev, NULL); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; @@ -256,7 +262,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) priv->txqs_n = 0; priv->txqs = NULL; } - mlx5_flow_delete_drop_queue(dev); mlx5_mprq_free_mp(dev); mlx5_mr_release(dev); if (priv->pd != NULL) { @@ -273,8 +278,12 @@ mlx5_dev_close(struct rte_eth_dev *dev) mlx5_socket_uninit(dev); if (priv->config.vf) mlx5_nl_mac_addr_flush(dev); - if (priv->nl_socket >= 0) - close(priv->nl_socket); + if (priv->nl_socket_route >= 0) + close(priv->nl_socket_route); + if (priv->nl_socket_rdma >= 0) + close(priv->nl_socket_rdma); + if (priv->mnl_socket) + mlx5_nl_flow_socket_destroy(priv->mnl_socket); ret = mlx5_hrxq_ibv_verify(dev); if (ret) DRV_LOG(WARNING, "port %u some hash Rx queue still remain", @@ -303,7 +312,27 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (ret) DRV_LOG(WARNING, "port %u some flows still remain", dev->data->port_id); + if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + unsigned int c = 0; + unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); + uint16_t port_id[i]; + + i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); + while (i--) { + struct priv *opriv = + rte_eth_devices[port_id[i]].data->dev_private; + + if (!opriv || + opriv->domain_id != priv->domain_id || + &rte_eth_devices[port_id[i]] == dev) + continue; + ++c; + } + if (!c) + claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + } memset(priv, 0, sizeof(*priv)); + priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; } const struct eth_dev_ops mlx5_dev_ops = { @@ -370,6 +399,10 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .dev_set_link_down = mlx5_set_link_down, .dev_set_link_up = mlx5_set_link_up, .dev_close = mlx5_dev_close, + .promiscuous_enable = mlx5_promiscuous_enable, + .promiscuous_disable = mlx5_promiscuous_disable, + .allmulticast_enable = mlx5_allmulticast_enable, + .allmulticast_disable = mlx5_allmulticast_disable, .link_update = mlx5_link_update, .stats_get = mlx5_stats_get, .stats_reset = mlx5_stats_reset, @@ -400,39 +433,6 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .is_removed = mlx5_is_removed, }; -static struct { - struct rte_pci_addr pci_addr; /* associated PCI address */ - uint32_t ports; /* physical ports bitfield. */ -} mlx5_dev[32]; - -/** - * Get device index in mlx5_dev[] from PCI bus address. - * - * @param[in] pci_addr - * PCI bus address to look for. - * - * @return - * mlx5_dev[] index on success, -1 on failure. - */ -static int -mlx5_dev_idx(struct rte_pci_addr *pci_addr) -{ - unsigned int i; - int ret = -1; - - assert(pci_addr != NULL); - for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) { - if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) && - (mlx5_dev[i].pci_addr.bus == pci_addr->bus) && - (mlx5_dev[i].pci_addr.devid == pci_addr->devid) && - (mlx5_dev[i].pci_addr.function == pci_addr->function)) - return i; - if ((mlx5_dev[i].ports == 0) && (ret == -1)) - ret = i; - } - return ret; -} - /** * Verify and store value for device argument. * @@ -452,6 +452,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque) struct mlx5_dev_config *config = opaque; unsigned long tmp; + /* No-op, port representors are processed in mlx5_dev_spawn(). */ + if (!strcmp(MLX5_REPRESENTOR, key)) + return 0; errno = 0; tmp = strtoul(val, NULL, 0); if (errno) { @@ -524,6 +527,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) MLX5_RX_VEC_EN, MLX5_L3_VXLAN_EN, MLX5_VF_NL_EN, + MLX5_REPRESENTOR, NULL, }; struct rte_kvargs *kvlist; @@ -600,7 +604,7 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev) rte_memseg_walk(find_lower_va_bound, &addr); /* keep distance to hugepages to minimize potential conflicts. */ - addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE); + addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE)); /* anonymous mmap, no real memory consumption. */ addr = mmap(addr, MLX5_UAR_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); @@ -668,133 +672,115 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev) } /** - * DPDK callback to register a PCI device. + * Spawn an Ethernet device from Verbs information. * - * This function creates an Ethernet device for each port of a given - * PCI device. - * - * @param[in] pci_drv - * PCI driver structure (mlx5_driver). - * @param[in] pci_dev - * PCI device information. + * @param dpdk_dev + * Backing DPDK device. + * @param ibv_dev + * Verbs device. + * @param vf + * If nonzero, enable VF-specific features. + * @param[in] switch_info + * Switch properties of Ethernet device. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * A valid Ethernet device object on success, NULL otherwise and rte_errno + * is set. The following error is defined: + * + * EBUSY: device is not supposed to be spawned. */ -static int -mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) +static struct rte_eth_dev * +mlx5_dev_spawn(struct rte_device *dpdk_dev, + struct ibv_device *ibv_dev, + int vf, + const struct mlx5_switch_info *switch_info) { - struct ibv_device **list = NULL; - struct ibv_device *ibv_dev; + struct ibv_context *ctx; + struct ibv_device_attr_ex attr; + struct ibv_port_attr port_attr; + struct ibv_pd *pd = NULL; + struct mlx5dv_context dv_attr = { .comp_mask = 0 }; + struct mlx5_dev_config config = { + .vf = !!vf, + .tx_vec_en = 1, + .rx_vec_en = 1, + .mpw_hdr_dseg = 0, + .txq_inline = MLX5_ARG_UNSET, + .txqs_inline = MLX5_ARG_UNSET, + .inline_max_packet_sz = MLX5_ARG_UNSET, + .vf_nl_en = 1, + .mprq = { + .enabled = 0, + .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N, + .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, + .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, + }, + }; + struct rte_eth_dev *eth_dev = NULL; + struct priv *priv = NULL; int err = 0; - struct ibv_context *attr_ctx = NULL; - struct ibv_device_attr_ex device_attr; - unsigned int vf = 0; unsigned int mps; unsigned int cqe_comp; unsigned int tunnel_en = 0; unsigned int mpls_en = 0; unsigned int swp = 0; - unsigned int verb_priorities = 0; unsigned int mprq = 0; unsigned int mprq_min_stride_size_n = 0; unsigned int mprq_max_stride_size_n = 0; unsigned int mprq_min_stride_num_n = 0; unsigned int mprq_max_stride_num_n = 0; - int idx; - int i; - struct mlx5dv_context attrs_out = {0}; #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - struct ibv_counter_set_description cs_desc; + struct ibv_counter_set_description cs_desc = { .counter_type = 0 }; #endif + struct ether_addr mac; + char name[RTE_ETH_NAME_MAX_LEN]; + int own_domain_id = 0; + unsigned int i; - /* Prepare shared data between primary and secondary process. */ - mlx5_prepare_shared_data(); - assert(pci_drv == &mlx5_driver); - /* Get mlx5_dev[] index. */ - idx = mlx5_dev_idx(&pci_dev->addr); - if (idx == -1) { - DRV_LOG(ERR, "this driver cannot support any more adapters"); - err = ENOMEM; - goto error; - } - DRV_LOG(DEBUG, "using driver device index %d", idx); - /* Save PCI address. */ - mlx5_dev[idx].pci_addr = pci_dev->addr; - list = mlx5_glue->get_device_list(&i); - if (list == NULL) { - assert(errno); - err = errno; - if (errno == ENOSYS) - DRV_LOG(ERR, - "cannot list devices, is ib_uverbs loaded?"); - goto error; - } - assert(i >= 0); - /* - * For each listed device, check related sysfs entry against - * the provided PCI ID. - */ - while (i != 0) { - struct rte_pci_addr pci_addr; + /* Determine if this port representor is supposed to be spawned. */ + if (switch_info->representor && dpdk_dev->devargs) { + struct rte_eth_devargs eth_da; - --i; - DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name); - if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr)) - continue; - if ((pci_dev->addr.domain != pci_addr.domain) || - (pci_dev->addr.bus != pci_addr.bus) || - (pci_dev->addr.devid != pci_addr.devid) || - (pci_dev->addr.function != pci_addr.function)) - continue; - DRV_LOG(INFO, "PCI information matches, using device \"%s\"", - list[i]->name); - vf = ((pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) || - (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)); - attr_ctx = mlx5_glue->open_device(list[i]); - rte_errno = errno; - err = rte_errno; - break; - } - if (attr_ctx == NULL) { - switch (err) { - case 0: - DRV_LOG(ERR, - "cannot access device, is mlx5_ib loaded?"); - err = ENODEV; - break; - case EINVAL: - DRV_LOG(ERR, - "cannot use device, are drivers up to date?"); - break; + err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); + if (err) { + rte_errno = -err; + DRV_LOG(ERR, "failed to process device arguments: %s", + strerror(rte_errno)); + return NULL; } - goto error; + for (i = 0; i < eth_da.nb_representor_ports; ++i) + if (eth_da.representor_ports[i] == + (uint16_t)switch_info->port_name) + break; + if (i == eth_da.nb_representor_ports) { + rte_errno = EBUSY; + return NULL; + } + } + /* Prepare shared data between primary and secondary process. */ + mlx5_prepare_shared_data(); + errno = 0; + ctx = mlx5_glue->open_device(ibv_dev); + if (!ctx) { + rte_errno = errno ? errno : ENODEV; + return NULL; } - ibv_dev = list[i]; - DRV_LOG(DEBUG, "device opened"); #ifdef HAVE_IBV_MLX5_MOD_SWP - attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; #endif /* * Multi-packet send is supported by ConnectX-4 Lx PF as well * as all ConnectX-5 devices. */ #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; #endif #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT - attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; #endif - mlx5_glue->dv_query_device(attr_ctx, &attrs_out); - if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { - if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { + mlx5_glue->dv_query_device(ctx, &dv_attr); + if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { + if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { DRV_LOG(DEBUG, "enhanced MPW is supported"); mps = MLX5_MPW_ENHANCED; } else { @@ -805,15 +791,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, DRV_LOG(DEBUG, "MPW isn't supported"); mps = MLX5_MPW_DISABLED; } + config.mps = mps; #ifdef HAVE_IBV_MLX5_MOD_SWP - if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_SWP) - swp = attrs_out.sw_parsing_caps.sw_parsing_offloads; + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) + swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; DRV_LOG(DEBUG, "SWP support: %u", swp); #endif + config.swp = !!swp; #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT - if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { struct mlx5dv_striding_rq_caps mprq_caps = - attrs_out.striding_rq_caps; + dv_attr.striding_rq_caps; DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", mprq_caps.min_single_stride_log_num_of_bytes); @@ -835,18 +823,21 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, mprq_caps.min_single_wqe_log_num_of_strides; mprq_max_stride_num_n = mprq_caps.max_single_wqe_log_num_of_strides; + config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, + mprq_min_stride_num_n); } #endif if (RTE_CACHE_LINE_SIZE == 128 && - !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) + !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) cqe_comp = 0; else cqe_comp = 1; + config.cqe_comp = cqe_comp; #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { - tunnel_en = ((attrs_out.tunnel_offloads_caps & + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { + tunnel_en = ((dv_attr.tunnel_offloads_caps & MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && - (attrs_out.tunnel_offloads_caps & + (dv_attr.tunnel_offloads_caps & MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); } DRV_LOG(DEBUG, "tunnel offloading is %ssupported", @@ -855,10 +846,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, DRV_LOG(WARNING, "tunnel offloading disabled due to old OFED/rdma-core version"); #endif + config.tunnel_en = tunnel_en; #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT - mpls_en = ((attrs_out.tunnel_offloads_caps & + mpls_en = ((dv_attr.tunnel_offloads_caps & MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && - (attrs_out.tunnel_offloads_caps & + (dv_attr.tunnel_offloads_caps & MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", mpls_en ? "" : "not "); @@ -866,387 +858,595 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" " old OFED/rdma-core version or firmware configuration"); #endif - err = mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr); + config.mpls_en = mpls_en; + err = mlx5_glue->query_device_ex(ctx, NULL, &attr); if (err) { DEBUG("ibv_query_device_ex() failed"); goto error; } - DRV_LOG(INFO, "%u port(s) detected", - device_attr.orig_attr.phys_port_cnt); - for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { - char name[RTE_ETH_NAME_MAX_LEN]; - int len; - uint32_t port = i + 1; /* ports are indexed from one */ - uint32_t test = (1 << i); - struct ibv_context *ctx = NULL; - struct ibv_port_attr port_attr; - struct ibv_pd *pd = NULL; - struct priv *priv = NULL; - struct rte_eth_dev *eth_dev = NULL; - struct ibv_device_attr_ex device_attr_ex; - struct ether_addr mac; - struct mlx5_dev_config config = { - .cqe_comp = cqe_comp, - .mps = mps, - .tunnel_en = tunnel_en, - .mpls_en = mpls_en, - .tx_vec_en = 1, - .rx_vec_en = 1, - .mpw_hdr_dseg = 0, - .txq_inline = MLX5_ARG_UNSET, - .txqs_inline = MLX5_ARG_UNSET, - .inline_max_packet_sz = MLX5_ARG_UNSET, - .vf_nl_en = 1, - .swp = !!swp, - .mprq = { - .enabled = 0, /* Disabled by default. */ - .stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, - mprq_min_stride_num_n), - .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, - .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, - }, - }; - - len = snprintf(name, sizeof(name), PCI_PRI_FMT, - pci_dev->addr.domain, pci_dev->addr.bus, - pci_dev->addr.devid, pci_dev->addr.function); - if (device_attr.orig_attr.phys_port_cnt > 1) - snprintf(name + len, sizeof(name), " port %u", i); - mlx5_dev[idx].ports |= test; - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - eth_dev = rte_eth_dev_attach_secondary(name); - if (eth_dev == NULL) { - DRV_LOG(ERR, "can not attach rte ethdev"); - rte_errno = ENOMEM; - err = rte_errno; - goto error; - } - eth_dev->device = &pci_dev->device; - eth_dev->dev_ops = &mlx5_dev_sec_ops; - err = mlx5_uar_init_secondary(eth_dev); - if (err) { - err = rte_errno; - goto error; - } - /* Receive command fd from primary process */ - err = mlx5_socket_connect(eth_dev); - if (err < 0) { - err = rte_errno; - goto error; - } - /* Remap UAR for Tx queues. */ - err = mlx5_tx_uar_remap(eth_dev, err); - if (err) { - err = rte_errno; - goto error; - } - /* - * Ethdev pointer is still required as input since - * the primary device is not accessible from the - * secondary process. - */ - eth_dev->rx_pkt_burst = - mlx5_select_rx_function(eth_dev); - eth_dev->tx_pkt_burst = - mlx5_select_tx_function(eth_dev); - rte_eth_dev_probing_finish(eth_dev); - continue; - } - DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test); - ctx = mlx5_glue->open_device(ibv_dev); - if (ctx == NULL) { - err = ENODEV; - goto port_error; + if (!switch_info->representor) + rte_strlcpy(name, dpdk_dev->name, sizeof(name)); + else + snprintf(name, sizeof(name), "%s_representor_%u", + dpdk_dev->name, switch_info->port_name); + DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (eth_dev == NULL) { + DRV_LOG(ERR, "can not attach rte ethdev"); + rte_errno = ENOMEM; + err = rte_errno; + goto error; } - /* Check port status. */ - err = mlx5_glue->query_port(ctx, port, &port_attr); + eth_dev->device = dpdk_dev; + eth_dev->dev_ops = &mlx5_dev_sec_ops; + err = mlx5_uar_init_secondary(eth_dev); if (err) { - DRV_LOG(ERR, "port query failed: %s", strerror(err)); - goto port_error; - } - if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { - DRV_LOG(ERR, - "port %d is not configured in Ethernet mode", - port); - err = EINVAL; - goto port_error; - } - if (port_attr.state != IBV_PORT_ACTIVE) - DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)", - port, - mlx5_glue->port_state_str(port_attr.state), - port_attr.state); - /* Allocate protection domain. */ - pd = mlx5_glue->alloc_pd(ctx); - if (pd == NULL) { - DRV_LOG(ERR, "PD allocation failure"); - err = ENOMEM; - goto port_error; + err = rte_errno; + goto error; } - mlx5_dev[idx].ports |= test; - /* from rte_ethdev.c */ - priv = rte_zmalloc("ethdev private structure", - sizeof(*priv), - RTE_CACHE_LINE_SIZE); - if (priv == NULL) { - DRV_LOG(ERR, "priv allocation failure"); - err = ENOMEM; - goto port_error; + /* Receive command fd from primary process */ + err = mlx5_socket_connect(eth_dev); + if (err < 0) { + err = rte_errno; + goto error; } - priv->ctx = ctx; - strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path, - sizeof(priv->ibdev_path)); - priv->device_attr = device_attr; - priv->port = port; - priv->pd = pd; - priv->mtu = ETHER_MTU; - err = mlx5_args(&config, pci_dev->device.devargs); + /* Remap UAR for Tx queues. */ + err = mlx5_tx_uar_remap(eth_dev, err); if (err) { - DRV_LOG(ERR, "failed to process device arguments: %s", - strerror(err)); err = rte_errno; - goto port_error; + goto error; + } + /* + * Ethdev pointer is still required as input since + * the primary device is not accessible from the + * secondary process. + */ + eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); + eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); + claim_zero(mlx5_glue->close_device(ctx)); + return eth_dev; + } + /* Check port status. */ + err = mlx5_glue->query_port(ctx, 1, &port_attr); + if (err) { + DRV_LOG(ERR, "port query failed: %s", strerror(err)); + goto error; + } + if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { + DRV_LOG(ERR, "port is not configured in Ethernet mode"); + err = EINVAL; + goto error; + } + if (port_attr.state != IBV_PORT_ACTIVE) + DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", + mlx5_glue->port_state_str(port_attr.state), + port_attr.state); + /* Allocate protection domain. */ + pd = mlx5_glue->alloc_pd(ctx); + if (pd == NULL) { + DRV_LOG(ERR, "PD allocation failure"); + err = ENOMEM; + goto error; + } + priv = rte_zmalloc("ethdev private structure", + sizeof(*priv), + RTE_CACHE_LINE_SIZE); + if (priv == NULL) { + DRV_LOG(ERR, "priv allocation failure"); + err = ENOMEM; + goto error; + } + priv->ctx = ctx; + strncpy(priv->ibdev_name, priv->ctx->device->name, + sizeof(priv->ibdev_name)); + strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path, + sizeof(priv->ibdev_path)); + priv->device_attr = attr; + priv->pd = pd; + priv->mtu = ETHER_MTU; +#ifndef RTE_ARCH_64 + /* Initialize UAR access locks for 32bit implementations. */ + rte_spinlock_init(&priv->uar_lock_cq); + for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) + rte_spinlock_init(&priv->uar_lock[i]); +#endif + /* Some internal functions rely on Netlink sockets, open them now. */ + priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); + priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); + priv->nl_sn = 0; + priv->representor = !!switch_info->representor; + priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; + priv->representor_id = + switch_info->representor ? switch_info->port_name : -1; + /* + * Look for sibling devices in order to reuse their switch domain + * if any, otherwise allocate one. + */ + i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0); + if (i > 0) { + uint16_t port_id[i]; + + i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i); + while (i--) { + const struct priv *opriv = + rte_eth_devices[port_id[i]].data->dev_private; + + if (!opriv || + opriv->domain_id == + RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) + continue; + priv->domain_id = opriv->domain_id; + break; } - err = mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex); + } + if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + err = rte_eth_switch_domain_alloc(&priv->domain_id); if (err) { - DRV_LOG(ERR, "ibv_query_device_ex() failed"); - goto port_error; + err = rte_errno; + DRV_LOG(ERR, "unable to allocate switch domain: %s", + strerror(rte_errno)); + goto error; } - config.hw_csum = !!(device_attr_ex.device_cap_flags_ex & - IBV_DEVICE_RAW_IP_CSUM); - DRV_LOG(DEBUG, "checksum offloading is %ssupported", - (config.hw_csum ? "" : "not ")); + own_domain_id = 1; + } + err = mlx5_args(&config, dpdk_dev->devargs); + if (err) { + err = rte_errno; + DRV_LOG(ERR, "failed to process device arguments: %s", + strerror(rte_errno)); + goto error; + } + config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM); + DRV_LOG(DEBUG, "checksum offloading is %ssupported", + (config.hw_csum ? "" : "not ")); #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - config.flow_counter_en = !!(device_attr.max_counter_sets); - mlx5_glue->describe_counter_set(ctx, 0, &cs_desc); - DRV_LOG(DEBUG, - "counter type = %d, num of cs = %ld, attributes = %d", - cs_desc.counter_type, cs_desc.num_of_cs, - cs_desc.attributes); + config.flow_counter_en = !!attr.max_counter_sets; + mlx5_glue->describe_counter_set(ctx, 0, &cs_desc); + DRV_LOG(DEBUG, "counter type = %d, num of cs = %ld, attributes = %d", + cs_desc.counter_type, cs_desc.num_of_cs, + cs_desc.attributes); #endif - config.ind_table_max_size = - device_attr_ex.rss_caps.max_rwq_indirection_table_size; - /* Remove this check once DPDK supports larger/variable - * indirection tables. */ - if (config.ind_table_max_size > - (unsigned int)ETH_RSS_RETA_SIZE_512) - config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; - DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", - config.ind_table_max_size); - config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps & - IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); - DRV_LOG(DEBUG, "VLAN stripping is %ssupported", - (config.hw_vlan_strip ? "" : "not ")); - - config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & - IBV_RAW_PACKET_CAP_SCATTER_FCS); - DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", - (config.hw_fcs_strip ? "" : "not ")); - + config.ind_table_max_size = + attr.rss_caps.max_rwq_indirection_table_size; + /* + * Remove this check once DPDK supports larger/variable + * indirection tables. + */ + if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) + config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; + DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", + config.ind_table_max_size); + config.hw_vlan_strip = !!(attr.raw_packet_caps & + IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); + DRV_LOG(DEBUG, "VLAN stripping is %ssupported", + (config.hw_vlan_strip ? "" : "not ")); + config.hw_fcs_strip = !!(attr.raw_packet_caps & + IBV_RAW_PACKET_CAP_SCATTER_FCS); + DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", + (config.hw_fcs_strip ? "" : "not ")); #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING - config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align; + config.hw_padding = !!attr.rx_pad_end_addr_align; #endif - DRV_LOG(DEBUG, - "hardware Rx end alignment padding is %ssupported", - (config.hw_padding ? "" : "not ")); - config.vf = vf; - config.tso = ((device_attr_ex.tso_caps.max_tso > 0) && - (device_attr_ex.tso_caps.supported_qpts & - (1 << IBV_QPT_RAW_PACKET))); - if (config.tso) - config.tso_max_payload_sz = - device_attr_ex.tso_caps.max_tso; - if (config.mps && !mps) { - DRV_LOG(ERR, - "multi-packet send not supported on this device" - " (" MLX5_TXQ_MPW_EN ")"); - err = ENOTSUP; - goto port_error; - } - DRV_LOG(INFO, "%s MPS is %s", - config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", - config.mps != MLX5_MPW_DISABLED ? "enabled" : - "disabled"); - if (config.cqe_comp && !cqe_comp) { - DRV_LOG(WARNING, "Rx CQE compression isn't supported"); - config.cqe_comp = 0; - } - config.mprq.enabled = config.mprq.enabled && mprq; - if (config.mprq.enabled) { - if (config.mprq.stride_num_n > mprq_max_stride_num_n || - config.mprq.stride_num_n < mprq_min_stride_num_n) { - config.mprq.stride_num_n = - RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, - mprq_min_stride_num_n); - DRV_LOG(WARNING, - "the number of strides" - " for Multi-Packet RQ is out of range," - " setting default value (%u)", - 1 << config.mprq.stride_num_n); - } - config.mprq.min_stride_size_n = mprq_min_stride_size_n; - config.mprq.max_stride_size_n = mprq_max_stride_size_n; - } - eth_dev = rte_eth_dev_allocate(name); - if (eth_dev == NULL) { - DRV_LOG(ERR, "can not allocate rte ethdev"); - err = ENOMEM; - goto port_error; - } - eth_dev->data->dev_private = priv; - priv->dev_data = eth_dev->data; - eth_dev->data->mac_addrs = priv->mac; - eth_dev->device = &pci_dev->device; - rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->device->driver = &mlx5_driver.driver; - err = mlx5_uar_init_primary(eth_dev); - if (err) { - err = rte_errno; - goto port_error; - } - /* Configure the first MAC address by default. */ - if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { - DRV_LOG(ERR, - "port %u cannot get MAC address, is mlx5_en" - " loaded? (errno: %s)", - eth_dev->data->port_id, strerror(errno)); - err = ENODEV; - goto port_error; + DRV_LOG(DEBUG, "hardware Rx end alignment padding is %ssupported", + (config.hw_padding ? "" : "not ")); + config.tso = (attr.tso_caps.max_tso > 0 && + (attr.tso_caps.supported_qpts & + (1 << IBV_QPT_RAW_PACKET))); + if (config.tso) + config.tso_max_payload_sz = attr.tso_caps.max_tso; + if (config.mps && !mps) { + DRV_LOG(ERR, + "multi-packet send not supported on this device" + " (" MLX5_TXQ_MPW_EN ")"); + err = ENOTSUP; + goto error; + } + DRV_LOG(INFO, "%sMPS is %s", + config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", + config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); + if (config.cqe_comp && !cqe_comp) { + DRV_LOG(WARNING, "Rx CQE compression isn't supported"); + config.cqe_comp = 0; + } + if (config.mprq.enabled && mprq) { + if (config.mprq.stride_num_n > mprq_max_stride_num_n || + config.mprq.stride_num_n < mprq_min_stride_num_n) { + config.mprq.stride_num_n = + RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, + mprq_min_stride_num_n); + DRV_LOG(WARNING, + "the number of strides" + " for Multi-Packet RQ is out of range," + " setting default value (%u)", + 1 << config.mprq.stride_num_n); } - DRV_LOG(INFO, - "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", - eth_dev->data->port_id, - mac.addr_bytes[0], mac.addr_bytes[1], - mac.addr_bytes[2], mac.addr_bytes[3], - mac.addr_bytes[4], mac.addr_bytes[5]); + config.mprq.min_stride_size_n = mprq_min_stride_size_n; + config.mprq.max_stride_size_n = mprq_max_stride_size_n; + } else if (config.mprq.enabled && !mprq) { + DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); + config.mprq.enabled = 0; + } + eth_dev = rte_eth_dev_allocate(name); + if (eth_dev == NULL) { + DRV_LOG(ERR, "can not allocate rte ethdev"); + err = ENOMEM; + goto error; + } + if (priv->representor) + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + eth_dev->data->dev_private = priv; + priv->dev_data = eth_dev->data; + eth_dev->data->mac_addrs = priv->mac; + eth_dev->device = dpdk_dev; + eth_dev->device->driver = &mlx5_driver.driver; + err = mlx5_uar_init_primary(eth_dev); + if (err) { + err = rte_errno; + goto error; + } + /* Configure the first MAC address by default. */ + if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { + DRV_LOG(ERR, + "port %u cannot get MAC address, is mlx5_en" + " loaded? (errno: %s)", + eth_dev->data->port_id, strerror(rte_errno)); + err = ENODEV; + goto error; + } + DRV_LOG(INFO, + "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", + eth_dev->data->port_id, + mac.addr_bytes[0], mac.addr_bytes[1], + mac.addr_bytes[2], mac.addr_bytes[3], + mac.addr_bytes[4], mac.addr_bytes[5]); #ifndef NDEBUG - { - char ifname[IF_NAMESIZE]; - - if (mlx5_get_ifname(eth_dev, &ifname) == 0) - DRV_LOG(DEBUG, "port %u ifname is \"%s\"", - eth_dev->data->port_id, ifname); - else - DRV_LOG(DEBUG, "port %u ifname is unknown", - eth_dev->data->port_id); - } + { + char ifname[IF_NAMESIZE]; + + if (mlx5_get_ifname(eth_dev, &ifname) == 0) + DRV_LOG(DEBUG, "port %u ifname is \"%s\"", + eth_dev->data->port_id, ifname); + else + DRV_LOG(DEBUG, "port %u ifname is unknown", + eth_dev->data->port_id); + } #endif - /* Get actual MTU if possible. */ - err = mlx5_get_mtu(eth_dev, &priv->mtu); - if (err) { - err = rte_errno; - goto port_error; - } - DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, - priv->mtu); - /* - * Initialize burst functions to prevent crashes before link-up. - */ - eth_dev->rx_pkt_burst = removed_rx_burst; - eth_dev->tx_pkt_burst = removed_tx_burst; - eth_dev->dev_ops = &mlx5_dev_ops; - /* Register MAC address. */ - claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); - priv->nl_socket = -1; - priv->nl_sn = 0; - if (vf && config.vf_nl_en) { - priv->nl_socket = mlx5_nl_init(RTMGRP_LINK); - if (priv->nl_socket < 0) - priv->nl_socket = -1; - mlx5_nl_mac_addr_sync(eth_dev); - } - TAILQ_INIT(&priv->flows); - TAILQ_INIT(&priv->ctrl_flows); - /* Hint libmlx5 to use PMD allocator for data plane resources */ - struct mlx5dv_ctx_allocators alctr = { - .alloc = &mlx5_alloc_verbs_buf, - .free = &mlx5_free_verbs_buf, - .data = priv, - }; - mlx5_glue->dv_set_context_attr(ctx, - MLX5DV_CTX_ATTR_BUF_ALLOCATORS, - (void *)((uintptr_t)&alctr)); - /* Bring Ethernet device up. */ - DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", - eth_dev->data->port_id); - mlx5_set_link_up(eth_dev); - /* - * Even though the interrupt handler is not installed yet, - * interrupts will still trigger on the asyn_fd from - * Verbs context returned by ibv_open_device(). - */ - mlx5_link_update(eth_dev, 0); - /* Store device configuration on private structure. */ - priv->config = config; - /* Create drop queue. */ - err = mlx5_flow_create_drop_queue(eth_dev); - if (err) { - DRV_LOG(ERR, "port %u drop queue allocation failed: %s", - eth_dev->data->port_id, strerror(rte_errno)); - err = rte_errno; - goto port_error; - } - /* Supported Verbs flow priority number detection. */ - if (verb_priorities == 0) - verb_priorities = mlx5_get_max_verbs_prio(eth_dev); - if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) { - DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u", - eth_dev->data->port_id, verb_priorities); - goto port_error; + /* Get actual MTU if possible. */ + err = mlx5_get_mtu(eth_dev, &priv->mtu); + if (err) { + err = rte_errno; + goto error; + } + DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, + priv->mtu); + /* Initialize burst functions to prevent crashes before link-up. */ + eth_dev->rx_pkt_burst = removed_rx_burst; + eth_dev->tx_pkt_burst = removed_tx_burst; + eth_dev->dev_ops = &mlx5_dev_ops; + /* Register MAC address. */ + claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); + if (vf && config.vf_nl_en) + mlx5_nl_mac_addr_sync(eth_dev); + priv->mnl_socket = mlx5_nl_flow_socket_create(); + if (!priv->mnl_socket) { + err = -rte_errno; + DRV_LOG(WARNING, + "flow rules relying on switch offloads will not be" + " supported: cannot open libmnl socket: %s", + strerror(rte_errno)); + } else { + struct rte_flow_error error; + unsigned int ifindex = mlx5_ifindex(eth_dev); + + if (!ifindex) { + err = -rte_errno; + error.message = + "cannot retrieve network interface index"; + } else { + err = mlx5_nl_flow_init(priv->mnl_socket, ifindex, + &error); } - priv->config.max_verbs_prio = verb_priorities; - /* - * Once the device is added to the list of memory event - * callback, its global MR cache table cannot be expanded - * on the fly because of deadlock. If it overflows, lookup - * should be done by searching MR list linearly, which is slow. - */ - err = mlx5_mr_btree_init(&priv->mr.cache, - MLX5_MR_BTREE_CACHE_N * 2, - eth_dev->device->numa_node); if (err) { - err = rte_errno; - goto port_error; + DRV_LOG(WARNING, + "flow rules relying on switch offloads will" + " not be supported: %s: %s", + error.message, strerror(rte_errno)); + mlx5_nl_flow_socket_destroy(priv->mnl_socket); + priv->mnl_socket = NULL; } - /* Add device to memory callback list. */ - rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); - LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, - priv, mem_event_cb); - rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); - rte_eth_dev_probing_finish(eth_dev); - continue; -port_error: - if (priv) - rte_free(priv); - if (pd) - claim_zero(mlx5_glue->dealloc_pd(pd)); - if (ctx) - claim_zero(mlx5_glue->close_device(ctx)); - if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_eth_dev_release_port(eth_dev); - break; } + TAILQ_INIT(&priv->flows); + TAILQ_INIT(&priv->ctrl_flows); + /* Hint libmlx5 to use PMD allocator for data plane resources */ + struct mlx5dv_ctx_allocators alctr = { + .alloc = &mlx5_alloc_verbs_buf, + .free = &mlx5_free_verbs_buf, + .data = priv, + }; + mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS, + (void *)((uintptr_t)&alctr)); + /* Bring Ethernet device up. */ + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", + eth_dev->data->port_id); + mlx5_set_link_up(eth_dev); /* - * XXX if something went wrong in the loop above, there is a resource - * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as - * long as the dpdk does not provide a way to deallocate a ethdev and a - * way to enumerate the registered ethdevs to free the previous ones. + * Even though the interrupt handler is not installed yet, + * interrupts will still trigger on the asyn_fd from + * Verbs context returned by ibv_open_device(). */ - /* no port found, complain */ - if (!mlx5_dev[idx].ports) { - rte_errno = ENODEV; + mlx5_link_update(eth_dev, 0); + /* Store device configuration on private structure. */ + priv->config = config; + /* Supported Verbs flow priority number detection. */ + err = mlx5_flow_discover_priorities(eth_dev); + if (err < 0) + goto error; + priv->config.flow_prio = err; + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + */ + err = mlx5_mr_btree_init(&priv->mr.cache, + MLX5_MR_BTREE_CACHE_N * 2, + eth_dev->device->numa_node); + if (err) { err = rte_errno; + goto error; } + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, + priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + return eth_dev; error: - if (attr_ctx) - claim_zero(mlx5_glue->close_device(attr_ctx)); - if (list) - mlx5_glue->free_device_list(list); - if (err) { - rte_errno = err; + if (priv) { + if (priv->nl_socket_route >= 0) + close(priv->nl_socket_route); + if (priv->nl_socket_rdma >= 0) + close(priv->nl_socket_rdma); + if (priv->mnl_socket) + mlx5_nl_flow_socket_destroy(priv->mnl_socket); + if (own_domain_id) + claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + rte_free(priv); + } + if (pd) + claim_zero(mlx5_glue->dealloc_pd(pd)); + if (eth_dev) + rte_eth_dev_release_port(eth_dev); + if (ctx) + claim_zero(mlx5_glue->close_device(ctx)); + assert(err > 0); + rte_errno = err; + return NULL; +} + +/** Data associated with devices to spawn. */ +struct mlx5_dev_spawn_data { + unsigned int ifindex; /**< Network interface index. */ + struct mlx5_switch_info info; /**< Switch information. */ + struct ibv_device *ibv_dev; /**< Associated IB device. */ + struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ +}; + +/** + * Comparison callback to sort device data. + * + * This is meant to be used with qsort(). + * + * @param a[in] + * Pointer to pointer to first data object. + * @param b[in] + * Pointer to pointer to second data object. + * + * @return + * 0 if both objects are equal, less than 0 if the first argument is less + * than the second, greater than 0 otherwise. + */ +static int +mlx5_dev_spawn_data_cmp(const void *a, const void *b) +{ + const struct mlx5_switch_info *si_a = + &((const struct mlx5_dev_spawn_data *)a)->info; + const struct mlx5_switch_info *si_b = + &((const struct mlx5_dev_spawn_data *)b)->info; + int ret; + + /* Master device first. */ + ret = si_b->master - si_a->master; + if (ret) + return ret; + /* Then representor devices. */ + ret = si_b->representor - si_a->representor; + if (ret) + return ret; + /* Unidentified devices come last in no specific order. */ + if (!si_a->representor) + return 0; + /* Order representors by name. */ + return si_a->port_name - si_b->port_name; +} + +/** + * DPDK callback to register a PCI device. + * + * This function spawns Ethernet devices out of a given PCI device. + * + * @param[in] pci_drv + * PCI driver structure (mlx5_driver). + * @param[in] pci_dev + * PCI device information. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + struct ibv_device **ibv_list; + unsigned int n = 0; + int vf; + int ret; + + assert(pci_drv == &mlx5_driver); + errno = 0; + ibv_list = mlx5_glue->get_device_list(&ret); + if (!ibv_list) { + rte_errno = errno ? errno : ENOSYS; + DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); return -rte_errno; } - return 0; + + struct ibv_device *ibv_match[ret + 1]; + + while (ret-- > 0) { + struct rte_pci_addr pci_addr; + + DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); + if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr)) + continue; + if (pci_dev->addr.domain != pci_addr.domain || + pci_dev->addr.bus != pci_addr.bus || + pci_dev->addr.devid != pci_addr.devid || + pci_dev->addr.function != pci_addr.function) + continue; + DRV_LOG(INFO, "PCI information matches for device \"%s\"", + ibv_list[ret]->name); + ibv_match[n++] = ibv_list[ret]; + } + ibv_match[n] = NULL; + + struct mlx5_dev_spawn_data list[n]; + int nl_route = n ? mlx5_nl_init(NETLINK_ROUTE) : -1; + int nl_rdma = n ? mlx5_nl_init(NETLINK_RDMA) : -1; + unsigned int i; + unsigned int u; + + /* + * The existence of several matching entries (n > 1) means port + * representors have been instantiated. No existing Verbs call nor + * /sys entries can tell them apart, this can only be done through + * Netlink calls assuming kernel drivers are recent enough to + * support them. + * + * In the event of identification failure through Netlink, try again + * through sysfs, then either: + * + * 1. No device matches (n == 0), complain and bail out. + * 2. A single IB device matches (n == 1) and is not a representor, + * assume no switch support. + * 3. Otherwise no safe assumptions can be made; complain louder and + * bail out. + */ + for (i = 0; i != n; ++i) { + list[i].ibv_dev = ibv_match[i]; + list[i].eth_dev = NULL; + if (nl_rdma < 0) + list[i].ifindex = 0; + else + list[i].ifindex = mlx5_nl_ifindex + (nl_rdma, list[i].ibv_dev->name); + if (nl_route < 0 || + !list[i].ifindex || + mlx5_nl_switch_info(nl_route, list[i].ifindex, + &list[i].info) || + ((!list[i].info.representor && !list[i].info.master) && + mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) { + list[i].ifindex = 0; + memset(&list[i].info, 0, sizeof(list[i].info)); + continue; + } + } + if (nl_rdma >= 0) + close(nl_rdma); + if (nl_route >= 0) + close(nl_route); + /* Count unidentified devices. */ + for (u = 0, i = 0; i != n; ++i) + if (!list[i].info.master && !list[i].info.representor) + ++u; + if (u) { + if (n == 1 && u == 1) { + /* Case #2. */ + DRV_LOG(INFO, "no switch support detected"); + } else { + /* Case #3. */ + DRV_LOG(ERR, + "unable to tell which of the matching devices" + " is the master (lack of kernel support?)"); + n = 0; + } + } + /* + * Sort list to probe devices in natural order for users convenience + * (i.e. master first, then representors from lowest to highest ID). + */ + if (n) + qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp); + switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: + vf = 1; + break; + default: + vf = 0; + } + for (i = 0; i != n; ++i) { + uint32_t restore; + + list[i].eth_dev = mlx5_dev_spawn + (&pci_dev->device, list[i].ibv_dev, vf, &list[i].info); + if (!list[i].eth_dev) { + if (rte_errno != EBUSY) + break; + /* Device is disabled, ignore it. */ + continue; + } + restore = list[i].eth_dev->data->dev_flags; + rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); + /* Restore non-PCI flags cleared by the above call. */ + list[i].eth_dev->data->dev_flags |= restore; + rte_eth_dev_probing_finish(list[i].eth_dev); + } + mlx5_glue->free_device_list(ibv_list); + if (!n) { + DRV_LOG(WARNING, + "no Verbs device matches PCI device " PCI_PRI_FMT "," + " are kernel drivers loaded?", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + rte_errno = ENOENT; + ret = -rte_errno; + } else if (i != n) { + DRV_LOG(ERR, + "probe of PCI device " PCI_PRI_FMT " aborted after" + " encountering an error: %s", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function, + strerror(rte_errno)); + ret = -rte_errno; + /* Roll back. */ + while (i--) { + if (!list[i].eth_dev) + continue; + mlx5_dev_close(list[i].eth_dev); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(list[i].eth_dev->data->dev_private); + claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); + } + /* Restore original error. */ + rte_errno = -ret; + } else { + ret = 0; + } + return ret; } static const struct rte_pci_id mlx5_pci_id_map[] = { @@ -1435,10 +1635,13 @@ glue_error: /** * Driver initialization routine. */ -RTE_INIT(rte_mlx5_pmd_init); -static void -rte_mlx5_pmd_init(void) +RTE_INIT(rte_mlx5_pmd_init) { + /* Initialize driver log type. */ + mlx5_logtype = rte_log_register("pmd.net.mlx5"); + if (mlx5_logtype >= 0) + rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); + /* Build the static tables for Verbs conversion. */ mlx5_set_ptype_table(); mlx5_set_cksum_table(); @@ -1453,6 +1656,11 @@ rte_mlx5_pmd_init(void) /* Match the size of Rx completion entry to the size of a cacheline. */ if (RTE_CACHE_LINE_SIZE == 128) setenv("MLX5_CQE_SIZE", "128", 0); + /* + * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to + * cleanup all the Verbs resources even when the device was removed. + */ + setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1); #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS if (mlx5_glue_init()) return; @@ -1480,11 +1688,3 @@ rte_mlx5_pmd_init(void) RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); - -/** Initialize driver log type. */ -RTE_INIT(vdev_netvsc_init_log) -{ - mlx5_logtype = rte_log_register("pmd.net.mlx5"); - if (mlx5_logtype >= 0) - rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); -} diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 997b04a3..a3a34cff 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -53,6 +53,14 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2, }; +/** Switch information returned by mlx5_nl_switch_info(). */ +struct mlx5_switch_info { + uint32_t master:1; /**< Master device. */ + uint32_t representor:1; /**< Representor device. */ + int32_t port_name; /**< Representor port name. */ + uint64_t switch_id; /**< Switch identifier. */ +}; + LIST_HEAD(mlx5_dev_list, priv); /* Shared memory between primary and secondary processes. */ @@ -114,7 +122,7 @@ struct mlx5_dev_config { unsigned int min_rxqs_num; /* Rx queue count threshold to enable MPRQ. */ } mprq; /* Configurations for Multi-Packet RQ. */ - unsigned int max_verbs_prio; /* Number of Verb flow priorities. */ + unsigned int flow_prio; /* Number of flow priorities. */ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int ind_table_max_size; /* Maximum indirection table size. */ int txq_inline; /* Maximum packet size for inlining. */ @@ -131,9 +139,6 @@ enum mlx5_verbs_alloc_type { MLX5_VERBS_ALLOC_TYPE_RX_QUEUE, }; -/* 8 Verbs priorities. */ -#define MLX5_VERBS_FLOW_PRIO_8 8 - /** * Verbs allocator needs a context to know in the callback which kind of * resources it is allocating. @@ -145,12 +150,27 @@ struct mlx5_verbs_alloc_ctx { LIST_HEAD(mlx5_mr_list, mlx5_mr); +/* Flow drop context necessary due to Verbs API. */ +struct mlx5_drop { + struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */ + struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */ +}; + +/** DPDK port to network interface index (ifindex) conversion. */ +struct mlx5_nl_flow_ptoi { + uint16_t port_id; /**< DPDK port ID. */ + unsigned int ifindex; /**< Network interface index. */ +}; + +struct mnl_socket; + struct priv { LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */ struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ struct ibv_context *ctx; /* Verbs context. */ struct ibv_device_attr_ex device_attr; /* Device properties. */ struct ibv_pd *pd; /* Protection Domain. */ + char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */ char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */ struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */ BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES); @@ -159,8 +179,10 @@ struct priv { unsigned int vlan_filter_n; /* Number of configured VLAN filters. */ /* Device properties. */ uint16_t mtu; /* Configured MTU. */ - uint8_t port; /* Physical port number. */ unsigned int isolated:1; /* Whether isolated mode is enabled. */ + unsigned int representor:1; /* Device is a port representor. */ + uint16_t domain_id; /* Switch domain identifier. */ + int32_t representor_id; /* Port representor identifier. */ /* RX/TX queues. */ unsigned int rxqs_n; /* RX queues array size. */ unsigned int txqs_n; /* TX queues array size. */ @@ -171,9 +193,11 @@ struct priv { struct rte_intr_handle intr_handle; /* Interrupt handler. */ unsigned int (*reta_idx)[]; /* RETA index table. */ unsigned int reta_idx_n; /* RETA index size. */ - struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */ + struct mlx5_drop drop_queue; /* Flow drop queues. */ struct mlx5_flows flows; /* RTE Flow rules. */ struct mlx5_flows ctrl_flows; /* Control flow rules. */ + LIST_HEAD(counters, mlx5_flow_counter) flow_counters; + /* Flow counters. */ struct { uint32_t dev_gen; /* Generation number to flush local caches. */ rte_rwlock_t rwlock; /* MR Lock. */ @@ -196,8 +220,15 @@ struct priv { struct mlx5_dev_config config; /* Device configuration. */ struct mlx5_verbs_alloc_ctx verbs_alloc_ctx; /* Context for Verbs allocator. */ - int nl_socket; /* Netlink socket. */ + int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ + int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ uint32_t nl_sn; /* Netlink message sequence number. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ + rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; + /* UAR same-page access control required in 32bit implementations. */ +#endif + struct mnl_socket *mnl_socket; /* Libmnl socket. */ }; #define PORT_ID(priv) ((priv)->dev_data->port_id) @@ -209,9 +240,12 @@ int mlx5_getenv_int(const char *); /* mlx5_ethdev.c */ +int mlx5_get_master_ifname(const struct rte_eth_dev *dev, + char (*ifname)[IF_NAMESIZE]); int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); -int mlx5_ifindex(const struct rte_eth_dev *dev); -int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr); +unsigned int mlx5_ifindex(const struct rte_eth_dev *dev); +int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr, + int master); int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags); @@ -236,6 +270,11 @@ int mlx5_set_link_up(struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); +unsigned int mlx5_dev_to_port_id(const struct rte_device *dev, + uint16_t *port_list, + unsigned int port_list_n); +int mlx5_sysfs_switch_info(unsigned int ifindex, + struct mlx5_switch_info *info); /* mlx5_mac.c */ @@ -296,7 +335,8 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ -unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev); +int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); +void mlx5_flow_print(struct rte_flow *flow); int mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], @@ -343,7 +383,7 @@ int mlx5_socket_connect(struct rte_eth_dev *priv); /* mlx5_nl.c */ -int mlx5_nl_init(uint32_t nlgroups); +int mlx5_nl_init(int protocol); int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, uint32_t index); int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac, @@ -352,5 +392,27 @@ void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev); void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev); int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable); int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable); +unsigned int mlx5_nl_ifindex(int nl, const char *name); +int mlx5_nl_switch_info(int nl, unsigned int ifindex, + struct mlx5_switch_info *info); + +/* mlx5_nl_flow.c */ + +int mlx5_nl_flow_transpose(void *buf, + size_t size, + const struct mlx5_nl_flow_ptoi *ptoi, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error); +void mlx5_nl_flow_brand(void *buf, uint32_t handle); +int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error); +int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error); +int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex, + struct rte_flow_error *error); +struct mnl_socket *mlx5_nl_flow_socket_create(void); +void mlx5_nl_flow_socket_destroy(struct mnl_socket *nl); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index 51124cdc..f2a16795 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -64,10 +64,11 @@ #define MLX5_VPMD_MIN_TXQS 4 /* Threshold of buffer replenishment for vectorized Rx. */ -#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U +#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \ + (RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2)) /* Maximum size of burst for vectorized Rx. */ -#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH +#define MLX5_VPMD_RX_MAX_BURST 64U /* * Maximum size of burst for vectorized Tx. This is related to the maximum size @@ -86,17 +87,31 @@ #define MLX5_LINK_STATUS_TIMEOUT 10 /* Reserved address space for UAR mapping. */ -#define MLX5_UAR_SIZE (1ULL << 32) +#define MLX5_UAR_SIZE (1ULL << (sizeof(uintptr_t) * 4)) /* Offset of reserved UAR address space to hugepage memory. Offset is used here * to minimize possibility of address next to hugepage being used by other code * in either primary or secondary process, failing to map TX UAR would make TX * packets invisible to HW. */ -#define MLX5_UAR_OFFSET (1ULL << 32) +#define MLX5_UAR_OFFSET (1ULL << (sizeof(uintptr_t) * 4)) + +/* Maximum number of UAR pages used by a port, + * These are the size and mask for an array of mutexes used to synchronize + * the access to port's UARs on platforms that do not support 64 bit writes. + * In such systems it is possible to issue the 64 bits DoorBells through two + * consecutive writes, each write 32 bits. The access to a UAR page (which can + * be accessible by all threads in the process) must be synchronized + * (for example, using a semaphore). Such a synchronization is not required + * when ringing DoorBells on different UAR pages. + * A port with 512 Tx queues uses 8, 4kBytes, UAR pages which are shared + * among the ports. + */ +#define MLX5_UAR_PAGE_NUM_MAX 64 +#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1) /* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */ -#define MLX5_MPRQ_STRIDE_NUM_N 4U +#define MLX5_MPRQ_STRIDE_NUM_N 6U /* Two-byte shift is disabled for Multi-Packet RQ. */ #define MLX5_MPRQ_TWO_BYTE_SHIFT 0 @@ -111,6 +126,11 @@ #define MLX5_MPRQ_MIN_RXQS 12 /* Cache size of mempool for Multi-Packet RQ. */ -#define MLX5_MPRQ_MP_CACHE_SZ 32 +#define MLX5_MPRQ_MP_CACHE_SZ 32U + +/* Definition of static_assert found in /usr/include/assert.h */ +#ifndef HAVE_STATIC_ASSERT +#define static_assert _Static_assert +#endif #endif /* RTE_PMD_MLX5_DEFS_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 90488af3..34c5b95e 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -41,6 +41,32 @@ #include "mlx5_rxtx.h" #include "mlx5_utils.h" +/* Supported speed values found in /usr/include/linux/ethtool.h */ +#ifndef HAVE_SUPPORTED_40000baseKR4_Full +#define SUPPORTED_40000baseKR4_Full (1 << 23) +#endif +#ifndef HAVE_SUPPORTED_40000baseCR4_Full +#define SUPPORTED_40000baseCR4_Full (1 << 24) +#endif +#ifndef HAVE_SUPPORTED_40000baseSR4_Full +#define SUPPORTED_40000baseSR4_Full (1 << 25) +#endif +#ifndef HAVE_SUPPORTED_40000baseLR4_Full +#define SUPPORTED_40000baseLR4_Full (1 << 26) +#endif +#ifndef HAVE_SUPPORTED_56000baseKR4_Full +#define SUPPORTED_56000baseKR4_Full (1 << 27) +#endif +#ifndef HAVE_SUPPORTED_56000baseCR4_Full +#define SUPPORTED_56000baseCR4_Full (1 << 28) +#endif +#ifndef HAVE_SUPPORTED_56000baseSR4_Full +#define SUPPORTED_56000baseSR4_Full (1 << 29) +#endif +#ifndef HAVE_SUPPORTED_56000baseLR4_Full +#define SUPPORTED_56000baseLR4_Full (1 << 30) +#endif + /* Add defines in case the running kernel is not the same as user headers. */ #ifndef ETHTOOL_GLINKSETTINGS struct ethtool_link_settings { @@ -93,7 +119,7 @@ struct ethtool_link_settings { #endif /** - * Get interface name from private structure. + * Get master interface name from private structure. * * @param[in] dev * Pointer to Ethernet device. @@ -104,7 +130,8 @@ struct ethtool_link_settings { * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) +mlx5_get_master_ifname(const struct rte_eth_dev *dev, + char (*ifname)[IF_NAMESIZE]) { struct priv *priv = dev->data->dev_private; DIR *dir; @@ -166,7 +193,7 @@ try_dev_id: if (dev_port == dev_port_prev) goto try_dev_id; dev_port_prev = dev_port; - if (dev_port == (priv->port - 1u)) + if (dev_port == 0) strlcpy(match, name, sizeof(match)); } closedir(dir); @@ -179,30 +206,59 @@ try_dev_id: } /** - * Get the interface index from device name. + * Get interface name from private structure. + * + * This is a port representor-aware version of mlx5_get_master_ifname(). * * @param[in] dev * Pointer to Ethernet device. + * @param[out] ifname + * Interface name output buffer. * * @return - * Interface index on success, a negative errno value otherwise and - * rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int +mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) +{ + struct priv *priv = dev->data->dev_private; + unsigned int ifindex = + priv->nl_socket_rdma >= 0 ? + mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0; + + if (!ifindex) { + if (!priv->representor) + return mlx5_get_master_ifname(dev, ifname); + rte_errno = ENXIO; + return -rte_errno; + } + if (if_indextoname(ifindex, &(*ifname)[0])) + return 0; + rte_errno = errno; + return -rte_errno; +} + +/** + * Get the interface index from device name. + * + * @param[in] dev + * Pointer to Ethernet device. + * + * @return + * Nonzero interface index on success, zero otherwise and rte_errno is set. + */ +unsigned int mlx5_ifindex(const struct rte_eth_dev *dev) { char ifname[IF_NAMESIZE]; - int ret; + unsigned int ifindex; - ret = mlx5_get_ifname(dev, &ifname); - if (ret) - return ret; - ret = if_nametoindex(ifname); - if (ret == -1) { + if (mlx5_get_ifname(dev, &ifname)) + return 0; + ifindex = if_nametoindex(ifname); + if (!ifindex) rte_errno = errno; - return -rte_errno; - } - return ret; + return ifindex; } /** @@ -214,12 +270,16 @@ mlx5_ifindex(const struct rte_eth_dev *dev) * Request number to pass to ioctl(). * @param[out] ifr * Interface request structure output buffer. + * @param master + * When device is a port representor, perform request on master device + * instead. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) +mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr, + int master) { int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); int ret = 0; @@ -228,7 +288,10 @@ mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) rte_errno = errno; return -rte_errno; } - ret = mlx5_get_ifname(dev, &ifr->ifr_name); + if (master) + ret = mlx5_get_master_ifname(dev, &ifr->ifr_name); + else + ret = mlx5_get_ifname(dev, &ifr->ifr_name); if (ret) goto error; ret = ioctl(sock, req, ifr); @@ -258,7 +321,7 @@ int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) { struct ifreq request; - int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); + int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request, 0); if (ret) return ret; @@ -282,7 +345,7 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct ifreq request = { .ifr_mtu = mtu, }; - return mlx5_ifreq(dev, SIOCSIFMTU, &request); + return mlx5_ifreq(dev, SIOCSIFMTU, &request, 0); } /** @@ -302,13 +365,13 @@ int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) { struct ifreq request; - int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); + int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request, 0); if (ret) return ret; request.ifr_flags &= keep; request.ifr_flags |= flags & ~keep; - return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); + return mlx5_ifreq(dev, SIOCSIFFLAGS, &request, 0); } /** @@ -335,15 +398,15 @@ mlx5_dev_configure(struct rte_eth_dev *dev) if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != - rss_hash_default_key_len)) { - DRV_LOG(ERR, "port %u RSS key len must be %zu Bytes long", - dev->data->port_id, rss_hash_default_key_len); + MLX5_RSS_HASH_KEY_LEN)) { + DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long", + dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN)); rte_errno = EINVAL; return -rte_errno; } priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, - rss_hash_default_key_len, 0); + MLX5_RSS_HASH_KEY_LEN, 0); if (!priv->rss_conf.rss_key) { DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", dev->data->port_id, rxqs_n); @@ -354,8 +417,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev) use_app_rss_key ? dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : rss_hash_default_key, - rss_hash_default_key_len); - priv->rss_conf.rss_key_len = rss_hash_default_key_len; + MLX5_RSS_HASH_KEY_LEN); + priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN; priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; priv->rxqs = (void *)dev->data->rx_queues; priv->txqs = (void *)dev->data->tx_queues; @@ -473,10 +536,34 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->if_index = if_nametoindex(ifname); info->reta_size = priv->reta_idx_n ? priv->reta_idx_n : config->ind_table_max_size; - info->hash_key_size = rss_hash_default_key_len; + info->hash_key_size = MLX5_RSS_HASH_KEY_LEN; info->speed_capa = priv->link_speed_capa; info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; mlx5_set_default_params(dev, info); + info->switch_info.name = dev->data->name; + info->switch_info.domain_id = priv->domain_id; + info->switch_info.port_id = priv->representor_id; + if (priv->representor) { + unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); + uint16_t port_id[i]; + + i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); + while (i--) { + struct priv *opriv = + rte_eth_devices[port_id[i]].data->dev_private; + + if (!opriv || + opriv->representor || + opriv->domain_id != priv->domain_id) + continue; + /* + * Override switch name with that of the master + * device. + */ + info->switch_info.name = opriv->dev_data->name; + break; + } + } } /** @@ -540,7 +627,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int link_speed = 0; int ret; - ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1); if (ret) { DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", dev->data->port_id, strerror(rte_errno)); @@ -550,7 +637,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&edata; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); if (ret) { DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", @@ -611,7 +698,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, uint64_t sc; int ret; - ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1); if (ret) { DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", dev->data->port_id, strerror(rte_errno)); @@ -621,7 +708,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&gcmd; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); if (ret) { DRV_LOG(DEBUG, "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" @@ -638,7 +725,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, *ecmd = gcmd; ifr.ifr_data = (void *)ecmd; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); if (ret) { DRV_LOG(DEBUG, "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" @@ -801,7 +888,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); if (ret) { DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" @@ -854,7 +941,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 0); if (ret) { DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" @@ -1193,3 +1280,93 @@ mlx5_is_removed(struct rte_eth_dev *dev) return 1; return 0; } + +/** + * Get port ID list of mlx5 instances sharing a common device. + * + * @param[in] dev + * Device to look for. + * @param[out] port_list + * Result buffer for collected port IDs. + * @param port_list_n + * Maximum number of entries in result buffer. If 0, @p port_list can be + * NULL. + * + * @return + * Number of matching instances regardless of the @p port_list_n + * parameter, 0 if none were found. + */ +unsigned int +mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, + unsigned int port_list_n) +{ + uint16_t id; + unsigned int n = 0; + + RTE_ETH_FOREACH_DEV(id) { + struct rte_eth_dev *ldev = &rte_eth_devices[id]; + + if (!ldev->device || + !ldev->device->driver || + strcmp(ldev->device->driver->name, MLX5_DRIVER_NAME) || + ldev->device != dev) + continue; + if (n < port_list_n) + port_list[n] = id; + n++; + } + return n; +} + +/** + * Get switch information associated with network interface. + * + * @param ifindex + * Network interface index. + * @param[out] info + * Switch information object, populated in case of success. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) +{ + char ifname[IF_NAMESIZE]; + FILE *file; + struct mlx5_switch_info data = { .master = 0, }; + bool port_name_set = false; + bool port_switch_id_set = false; + char c; + + if (!if_indextoname(ifindex, ifname)) { + rte_errno = errno; + return -rte_errno; + } + + MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", + ifname); + MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", + ifname); + + file = fopen(phys_port_name, "rb"); + if (file != NULL) { + port_name_set = + fscanf(file, "%d%c", &data.port_name, &c) == 2 && + c == '\n'; + fclose(file); + } + file = fopen(phys_switch_id, "rb"); + if (file == NULL) { + rte_errno = errno; + return -rte_errno; + } + port_switch_id_set = + fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && + c == '\n'; + fclose(file); + data.master = port_switch_id_set && !port_name_set; + data.representor = port_switch_id_set && port_name_set; + *info = data; + return 0; +} diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 994be05b..ca4625b6 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -4,6 +4,7 @@ */ #include <sys/queue.h> +#include <stdalign.h> #include <stdint.h> #include <string.h> @@ -31,497 +32,289 @@ #include "mlx5_prm.h" #include "mlx5_glue.h" -/* Flow priority for control plane flows. */ -#define MLX5_CTRL_FLOW_PRIORITY 1 - -/* Internet Protocol versions. */ -#define MLX5_IPV4 4 -#define MLX5_IPV6 6 -#define MLX5_GRE 47 - -#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT -struct ibv_flow_spec_counter_action { - int dummy; -}; -#endif - /* Dev ops structure defined in mlx5.c */ extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; -/** Structure give to the conversion functions. */ -struct mlx5_flow_data { - struct rte_eth_dev *dev; /** Ethernet device. */ - struct mlx5_flow_parse *parser; /** Parser context. */ - struct rte_flow_error *error; /** Error context. */ -}; - -static int -mlx5_flow_create_eth(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_vlan(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_ipv4(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_ipv6(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_udp(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_tcp(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_vxlan(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_gre(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -static int -mlx5_flow_create_mpls(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - -struct mlx5_flow_parse; - -static void -mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, - unsigned int size); - -static int -mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id); - -static int -mlx5_flow_create_count(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser); - -/* Hash RX queue types. */ -enum hash_rxq_type { - HASH_RXQ_TCPV4, - HASH_RXQ_UDPV4, - HASH_RXQ_IPV4, - HASH_RXQ_TCPV6, - HASH_RXQ_UDPV6, - HASH_RXQ_IPV6, - HASH_RXQ_ETH, - HASH_RXQ_TUNNEL, +/* Pattern outer Layer bits. */ +#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) +#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) +#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) +#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) +#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) +#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) + +/* Pattern inner Layer bits. */ +#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) +#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) +#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) +#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) +#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) +#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) + +/* Pattern tunnel Layer bits. */ +#define MLX5_FLOW_LAYER_VXLAN (1u << 12) +#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) +#define MLX5_FLOW_LAYER_GRE (1u << 14) +#define MLX5_FLOW_LAYER_MPLS (1u << 15) + +/* Outer Masks. */ +#define MLX5_FLOW_LAYER_OUTER_L3 \ + (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) +#define MLX5_FLOW_LAYER_OUTER_L4 \ + (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) +#define MLX5_FLOW_LAYER_OUTER \ + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ + MLX5_FLOW_LAYER_OUTER_L4) + +/* Tunnel Masks. */ +#define MLX5_FLOW_LAYER_TUNNEL \ + (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ + MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS) + +/* Inner Masks. */ +#define MLX5_FLOW_LAYER_INNER_L3 \ + (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) +#define MLX5_FLOW_LAYER_INNER_L4 \ + (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) +#define MLX5_FLOW_LAYER_INNER \ + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ + MLX5_FLOW_LAYER_INNER_L4) + +/* Actions that modify the fate of matching traffic. */ +#define MLX5_FLOW_FATE_DROP (1u << 0) +#define MLX5_FLOW_FATE_QUEUE (1u << 1) +#define MLX5_FLOW_FATE_RSS (1u << 2) + +/* Modify a packet. */ +#define MLX5_FLOW_MOD_FLAG (1u << 0) +#define MLX5_FLOW_MOD_MARK (1u << 1) +#define MLX5_FLOW_MOD_COUNT (1u << 2) + +/* possible L3 layers protocols filtering. */ +#define MLX5_IP_PROTOCOL_TCP 6 +#define MLX5_IP_PROTOCOL_UDP 17 +#define MLX5_IP_PROTOCOL_GRE 47 +#define MLX5_IP_PROTOCOL_MPLS 147 + +/* Priority reserved for default flows. */ +#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) + +enum mlx5_expansion { + MLX5_EXPANSION_ROOT, + MLX5_EXPANSION_ROOT_OUTER, + MLX5_EXPANSION_ROOT_ETH_VLAN, + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_VLAN, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP, + MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_ETH, + MLX5_EXPANSION_ETH_VLAN, + MLX5_EXPANSION_VLAN, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP, + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP, }; -/* Initialization data for hash RX queue. */ -struct hash_rxq_init { - uint64_t hash_fields; /* Fields that participate in the hash. */ - uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */ - unsigned int flow_priority; /* Flow priority to use. */ - unsigned int ip_version; /* Internet protocol. */ -}; - -/* Initialization data for hash RX queues. */ -const struct hash_rxq_init hash_rxq_init[] = { - [HASH_RXQ_TCPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, - .flow_priority = 0, - .ip_version = MLX5_IPV4, +/** Supported expansion of items. */ +static const struct rte_flow_expand_node mlx5_support_expansion[] = { + [MLX5_EXPANSION_ROOT] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_UDPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, - .flow_priority = 0, - .ip_version = MLX5_IPV4, + [MLX5_EXPANSION_ROOT_OUTER] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_IPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4), - .dpdk_rss_hf = (ETH_RSS_IPV4 | - ETH_RSS_FRAG_IPV4), - .flow_priority = 1, - .ip_version = MLX5_IPV4, + [MLX5_EXPANSION_ROOT_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_TCPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, - .flow_priority = 0, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, }, - [HASH_RXQ_UDPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, - .flow_priority = 0, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_OUTER_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_MPLS), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, }, - [HASH_RXQ_IPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6), - .dpdk_rss_hf = (ETH_RSS_IPV6 | - ETH_RSS_FRAG_IPV6), - .flow_priority = 1, - .ip_version = MLX5_IPV6, + [MLX5_EXPANSION_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, }, - [HASH_RXQ_ETH] = { - .hash_fields = 0, - .dpdk_rss_hf = 0, - .flow_priority = 2, + [MLX5_EXPANSION_OUTER_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, }, -}; - -/* Number of entries in hash_rxq_init[]. */ -const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); - -/** Structure for holding counter stats. */ -struct mlx5_flow_counter_stats { - uint64_t hits; /**< Number of packets matched by the rule. */ - uint64_t bytes; /**< Number of bytes matched by the rule. */ -}; - -/** Structure for Drop queue. */ -struct mlx5_hrxq_drop { - struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ - struct ibv_qp *qp; /**< Verbs queue pair. */ - struct ibv_wq *wq; /**< Verbs work queue. */ - struct ibv_cq *cq; /**< Verbs completion queue. */ -}; - -/* Flows structures. */ -struct mlx5_flow { - uint64_t hash_fields; /**< Fields that participate in the hash. */ - struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ - struct ibv_flow *ibv_flow; /**< Verbs flow. */ - struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ -}; - -/* Drop flows structures. */ -struct mlx5_flow_drop { - struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ - struct ibv_flow *ibv_flow; /**< Verbs flow. */ -}; - -struct rte_flow { - TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ - uint32_t mark:1; /**< Set if the flow is marked. */ - uint32_t drop:1; /**< Drop queue. */ - struct rte_flow_action_rss rss_conf; /**< RSS configuration */ - uint16_t (*queues)[]; /**< Queues indexes to use. */ - uint8_t rss_key[40]; /**< copy of the RSS key. */ - uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */ - struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ - struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */ - struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)]; - /**< Flow with Rx queue. */ -}; - -/** Static initializer for items. */ -#define ITEMS(...) \ - (const enum rte_flow_item_type []){ \ - __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ - } - -#define IS_TUNNEL(type) ( \ - (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \ - (type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \ - (type) == RTE_FLOW_ITEM_TYPE_GRE || \ - (type) == RTE_FLOW_ITEM_TYPE_MPLS) - -const uint32_t flow_ptype[] = { - [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, - [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE, - [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE, - [RTE_FLOW_ITEM_TYPE_MPLS] = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, -}; - -#define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12) - -const uint32_t ptype_ext[] = { - [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN | - RTE_PTYPE_L4_UDP, - [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)] = RTE_PTYPE_TUNNEL_VXLAN_GPE | - RTE_PTYPE_L4_UDP, - [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE, - [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] = - RTE_PTYPE_TUNNEL_MPLS_IN_GRE, - [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] = - RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, -}; - -/** Structure to generate a simple graph of layers supported by the NIC. */ -struct mlx5_flow_items { - /** List of possible actions for these items. */ - const enum rte_flow_action_type *const actions; - /** Bit-masks corresponding to the possibilities for the item. */ - const void *mask; - /** - * Default bit-masks to use when item->mask is not provided. When - * \default_mask is also NULL, the full supported bit-mask (\mask) is - * used instead. - */ - const void *default_mask; - /** Bit-masks size in bytes. */ - const unsigned int mask_sz; - /** - * Conversion function from rte_flow to NIC specific flow. - * - * @param item - * rte_flow item to convert. - * @param default_mask - * Default bit-masks to use when item->mask is not provided. - * @param data - * Internal structure to store the conversion. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is - * set. - */ - int (*convert)(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data); - /** Size in bytes of the destination structure. */ - const unsigned int dst_sz; - /** List of possible following items. */ - const enum rte_flow_item_type *const items; -}; - -/** Valid action for this PMD. */ -static const enum rte_flow_action_type valid_actions[] = { - RTE_FLOW_ACTION_TYPE_DROP, - RTE_FLOW_ACTION_TYPE_QUEUE, - RTE_FLOW_ACTION_TYPE_MARK, - RTE_FLOW_ACTION_TYPE_FLAG, -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - RTE_FLOW_ACTION_TYPE_COUNT, -#endif - RTE_FLOW_ACTION_TYPE_END, -}; - -/** Graph of supported items and associated actions. */ -static const struct mlx5_flow_items mlx5_flow_items[] = { - [RTE_FLOW_ITEM_TYPE_END] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE, - RTE_FLOW_ITEM_TYPE_GRE), + [MLX5_EXPANSION_OUTER_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_GRE), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, }, - [RTE_FLOW_ITEM_TYPE_ETH] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_eth){ - .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", - .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", - .type = -1, - }, - .default_mask = &rte_flow_item_eth_mask, - .mask_sz = sizeof(struct rte_flow_item_eth), - .convert = mlx5_flow_create_eth, - .dst_sz = sizeof(struct ibv_flow_spec_eth), + [MLX5_EXPANSION_OUTER_IPV4_UDP] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, }, - [RTE_FLOW_ITEM_TYPE_VLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_vlan){ - .tci = -1, - .inner_type = -1, - }, - .default_mask = &rte_flow_item_vlan_mask, - .mask_sz = sizeof(struct rte_flow_item_vlan), - .convert = mlx5_flow_create_vlan, - .dst_sz = 0, + [MLX5_EXPANSION_OUTER_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, }, - [RTE_FLOW_ITEM_TYPE_IPV4] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_GRE), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_ipv4){ - .hdr = { - .src_addr = -1, - .dst_addr = -1, - .type_of_service = -1, - .next_proto_id = -1, - }, - }, - .default_mask = &rte_flow_item_ipv4_mask, - .mask_sz = sizeof(struct rte_flow_item_ipv4), - .convert = mlx5_flow_create_ipv4, - .dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext), + [MLX5_EXPANSION_OUTER_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, }, - [RTE_FLOW_ITEM_TYPE_IPV6] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP, - RTE_FLOW_ITEM_TYPE_GRE), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_ipv6){ - .hdr = { - .src_addr = { - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - }, - .dst_addr = { - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - }, - .vtc_flow = -1, - .proto = -1, - .hop_limits = -1, - }, - }, - .default_mask = &rte_flow_item_ipv6_mask, - .mask_sz = sizeof(struct rte_flow_item_ipv6), - .convert = mlx5_flow_create_ipv6, - .dst_sz = sizeof(struct ibv_flow_spec_ipv6), + [MLX5_EXPANSION_OUTER_IPV6_UDP] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, }, - [RTE_FLOW_ITEM_TYPE_UDP] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE, - RTE_FLOW_ITEM_TYPE_MPLS), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_udp){ - .hdr = { - .src_port = -1, - .dst_port = -1, - }, - }, - .default_mask = &rte_flow_item_udp_mask, - .mask_sz = sizeof(struct rte_flow_item_udp), - .convert = mlx5_flow_create_udp, - .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), + [MLX5_EXPANSION_OUTER_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, }, - [RTE_FLOW_ITEM_TYPE_TCP] = { - .actions = valid_actions, - .mask = &(const struct rte_flow_item_tcp){ - .hdr = { - .src_port = -1, - .dst_port = -1, - }, - }, - .default_mask = &rte_flow_item_tcp_mask, - .mask_sz = sizeof(struct rte_flow_item_tcp), - .convert = mlx5_flow_create_tcp, - .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), + [MLX5_EXPANSION_VXLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), + .type = RTE_FLOW_ITEM_TYPE_VXLAN, }, - [RTE_FLOW_ITEM_TYPE_GRE] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6, - RTE_FLOW_ITEM_TYPE_MPLS), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_gre){ - .protocol = -1, - }, - .default_mask = &rte_flow_item_gre_mask, - .mask_sz = sizeof(struct rte_flow_item_gre), - .convert = mlx5_flow_create_gre, -#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT - .dst_sz = sizeof(struct ibv_flow_spec_gre), -#else - .dst_sz = sizeof(struct ibv_flow_spec_tunnel), -#endif + [MLX5_EXPANSION_VXLAN_GPE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, }, - [RTE_FLOW_ITEM_TYPE_MPLS] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_mpls){ - .label_tc_s = "\xff\xff\xf0", - }, - .default_mask = &rte_flow_item_mpls_mask, - .mask_sz = sizeof(struct rte_flow_item_mpls), - .convert = mlx5_flow_create_mpls, -#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT - .dst_sz = sizeof(struct ibv_flow_spec_mpls), -#endif + [MLX5_EXPANSION_GRE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), + .type = RTE_FLOW_ITEM_TYPE_GRE, }, - [RTE_FLOW_ITEM_TYPE_VXLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, /* For L3 VXLAN. */ - RTE_FLOW_ITEM_TYPE_IPV6), /* For L3 VXLAN. */ - .actions = valid_actions, - .mask = &(const struct rte_flow_item_vxlan){ - .vni = "\xff\xff\xff", - }, - .default_mask = &rte_flow_item_vxlan_mask, - .mask_sz = sizeof(struct rte_flow_item_vxlan), - .convert = mlx5_flow_create_vxlan, - .dst_sz = sizeof(struct ibv_flow_spec_tunnel), + [MLX5_EXPANSION_MPLS] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_MPLS, }, - [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), - .actions = valid_actions, - .mask = &(const struct rte_flow_item_vxlan_gpe){ - .vni = "\xff\xff\xff", - }, - .default_mask = &rte_flow_item_vxlan_gpe_mask, - .mask_sz = sizeof(struct rte_flow_item_vxlan_gpe), - .convert = mlx5_flow_create_vxlan_gpe, - .dst_sz = sizeof(struct ibv_flow_spec_tunnel), + [MLX5_EXPANSION_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, + }, + [MLX5_EXPANSION_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, + }, + [MLX5_EXPANSION_IPV4_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + }, + [MLX5_EXPANSION_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + }, + [MLX5_EXPANSION_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, + }, + [MLX5_EXPANSION_IPV6_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + }, + [MLX5_EXPANSION_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, }, }; -/** Structure to pass to the conversion function. */ -struct mlx5_flow_parse { - uint32_t inner; /**< Verbs value, set once tunnel is encountered. */ - uint32_t create:1; - /**< Whether resources should remain after a validate. */ - uint32_t drop:1; /**< Target is a drop queue. */ - uint32_t mark:1; /**< Mark is present in the flow. */ - uint32_t count:1; /**< Count is present in the flow. */ - uint32_t mark_id; /**< Mark identifier. */ - struct rte_flow_action_rss rss_conf; /**< RSS configuration */ - uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ - uint8_t rss_key[40]; /**< copy of the RSS key. */ - enum hash_rxq_type layer; /**< Last pattern layer detected. */ - enum hash_rxq_type out_layer; /**< Last outer pattern layer detected. */ - uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */ - struct ibv_counter_set *cs; /**< Holds the counter set for the rule */ +/** Handles information leading to a drop fate. */ +struct mlx5_flow_verbs { + LIST_ENTRY(mlx5_flow_verbs) next; + unsigned int size; /**< Size of the attribute. */ struct { - struct ibv_flow_attr *ibv_attr; - /**< Pointer to Verbs attributes. */ - unsigned int offset; - /**< Current position or total size of the attribute. */ - uint64_t hash_fields; /**< Verbs hash fields. */ - } queue[RTE_DIM(hash_rxq_init)]; + struct ibv_flow_attr *attr; + /**< Pointer to the Specification buffer. */ + uint8_t *specs; /**< Pointer to the specifications. */ + }; + struct ibv_flow *flow; /**< Verbs flow pointer. */ + struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ + uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ +}; + +/* Counters information. */ +struct mlx5_flow_counter { + LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */ + uint32_t shared:1; /**< Share counter ID with other flow rules. */ + uint32_t ref_cnt:31; /**< Reference counter. */ + uint32_t id; /**< Counter ID. */ + struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ + uint64_t hits; /**< Number of packets matched by the rule. */ + uint64_t bytes; /**< Number of bytes matched by the rule. */ +}; + +/* Flow structure. */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + struct rte_flow_attr attributes; /**< User flow attribute. */ + uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */ + uint32_t layers; + /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */ + uint32_t modifier; + /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */ + uint32_t fate; + /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */ + uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */ + LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */ + struct mlx5_flow_verbs *cur_verbs; + /**< Current Verbs flow structure being filled. */ + struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */ + struct rte_flow_action_rss rss;/**< RSS context. */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ + uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ + void *nl_flow; /**< Netlink flow buffer if relevant. */ }; static const struct rte_flow_ops mlx5_flow_ops = { @@ -529,12 +322,8 @@ static const struct rte_flow_ops mlx5_flow_ops = { .create = mlx5_flow_create, .destroy = mlx5_flow_destroy, .flush = mlx5_flow_flush, -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - .query = mlx5_flow_query, -#else - .query = NULL, -#endif .isolate = mlx5_flow_isolate, + .query = mlx5_flow_query, }; /* Convert FDIR request to Generic flow. */ @@ -569,883 +358,459 @@ struct ibv_spec_header { uint16_t size; }; -/** - * Check item is fully supported by the NIC matching capability. - * - * @param item[in] - * Item specification. - * @param mask[in] - * Bit-masks covering supported fields to compare with spec, last and mask in - * \item. - * @param size - * Bit-Mask size in bytes. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. +/* + * Number of sub priorities. + * For each kind of pattern matching i.e. L2, L3, L4 to have a correct + * matching on the NIC (firmware dependent) L4 most have the higher priority + * followed by L3 and ending with L2. */ -static int -mlx5_flow_item_validate(const struct rte_flow_item *item, - const uint8_t *mask, unsigned int size) -{ - unsigned int i; - const uint8_t *spec = item->spec; - const uint8_t *last = item->last; - const uint8_t *m = item->mask ? item->mask : mask; +#define MLX5_PRIORITY_MAP_L2 2 +#define MLX5_PRIORITY_MAP_L3 1 +#define MLX5_PRIORITY_MAP_L4 0 +#define MLX5_PRIORITY_MAP_MAX 3 + +/* Map of Verbs to Flow priority with 8 Verbs priorities. */ +static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, +}; - if (!spec && (item->mask || last)) - goto error; - if (!spec) - return 0; - /* - * Single-pass check to make sure that: - * - item->mask is supported, no bits are set outside mask. - * - Both masked item->spec and item->last are equal (no range - * supported). - */ - for (i = 0; i < size; i++) { - if (!m[i]) - continue; - if ((m[i] | mask[i]) != mask[i]) - goto error; - if (last && ((spec[i] & m[i]) != (last[i] & m[i]))) - goto error; - } - return 0; -error: - rte_errno = ENOTSUP; - return -rte_errno; -} +/* Map of Verbs to Flow priority with 16 Verbs priorities. */ +static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, + { 9, 10, 11 }, { 12, 13, 14 }, +}; + +/* Tunnel information. */ +struct mlx5_flow_tunnel_info { + uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ + uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ +}; + +static struct mlx5_flow_tunnel_info tunnels_info[] = { + { + .tunnel = MLX5_FLOW_LAYER_VXLAN, + .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, + .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_GRE, + .ptype = RTE_PTYPE_TUNNEL_GRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + }, +}; /** - * Extract attribute to the parser. + * Discover the maximum number of priority available. * - * @param[in] attr - * Flow rule attributes. - * @param[out] error - * Perform verbose error reporting if not NULL. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * number of supported flow priority on success, a negative errno + * value otherwise and rte_errno is set. */ -static int -mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, - struct rte_flow_error *error) +int +mlx5_flow_discover_priorities(struct rte_eth_dev *dev) { - if (attr->group) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - NULL, - "groups are not supported"); - return -rte_errno; - } - if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - NULL, - "priorities are not supported"); - return -rte_errno; - } - if (attr->egress) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - NULL, - "egress is not supported"); + struct { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth eth; + struct ibv_flow_spec_action_drop drop; + } flow_attr = { + .attr = { + .num_of_specs = 2, + }, + .eth = { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(struct ibv_flow_spec_eth), + }, + .drop = { + .size = sizeof(struct ibv_flow_spec_action_drop), + .type = IBV_FLOW_SPEC_ACTION_DROP, + }, + }; + struct ibv_flow *flow; + struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); + uint16_t vprio[] = { 8, 16 }; + int i; + int priority = 0; + + if (!drop) { + rte_errno = ENOTSUP; return -rte_errno; } - if (attr->transfer) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, - NULL, - "transfer is not supported"); - return -rte_errno; + for (i = 0; i != RTE_DIM(vprio); i++) { + flow_attr.attr.priority = vprio[i] - 1; + flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); + if (!flow) + break; + claim_zero(mlx5_glue->destroy_flow(flow)); + priority = vprio[i]; } - if (!attr->ingress) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "only ingress is supported"); + switch (priority) { + case 8: + priority = RTE_DIM(priority_map_3); + break; + case 16: + priority = RTE_DIM(priority_map_5); + break; + default: + rte_errno = ENOTSUP; + DRV_LOG(ERR, + "port %u verbs maximum priority: %d expected 8/16", + dev->data->port_id, vprio[i]); return -rte_errno; } - return 0; + mlx5_hrxq_drop_release(dev); + DRV_LOG(INFO, "port %u flow maximum priority: %d", + dev->data->port_id, priority); + return priority; } /** - * Extract actions request to the parser. + * Adjust flow priority. * * @param dev * Pointer to Ethernet device. - * @param[in] actions - * Associated actions (list terminated by the END action). - * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * @param flow + * Pointer to an rte flow. */ -static int -mlx5_flow_convert_actions(struct rte_eth_dev *dev, - const struct rte_flow_action actions[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +static void +mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow) { - enum { FATE = 1, MARK = 2, COUNT = 4, }; - uint32_t overlap = 0; struct priv *priv = dev->data->dev_private; + uint32_t priority = flow->attributes.priority; + uint32_t subpriority = flow->cur_verbs->attr->priority; - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { - if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { - continue; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { - if (overlap & FATE) - goto exit_action_overlap; - overlap |= FATE; - parser->drop = 1; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - const struct rte_flow_action_queue *queue = - (const struct rte_flow_action_queue *) - actions->conf; - - if (overlap & FATE) - goto exit_action_overlap; - overlap |= FATE; - if (!queue || (queue->index > (priv->rxqs_n - 1))) - goto exit_action_not_supported; - parser->queues[0] = queue->index; - parser->rss_conf = (struct rte_flow_action_rss){ - .queue_num = 1, - .queue = parser->queues, - }; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { - const struct rte_flow_action_rss *rss = - (const struct rte_flow_action_rss *) - actions->conf; - const uint8_t *rss_key; - uint32_t rss_key_len; - uint16_t n; - - if (overlap & FATE) - goto exit_action_overlap; - overlap |= FATE; - if (rss->func && - rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "the only supported RSS hash" - " function is Toeplitz"); - return -rte_errno; - } -#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - if (parser->rss_conf.level > 1) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "a nonzero RSS encapsulation" - " level is not supported"); - return -rte_errno; - } -#endif - if (parser->rss_conf.level > 2) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "RSS encapsulation level" - " > 1 is not supported"); - return -rte_errno; - } - if (rss->types & MLX5_RSS_HF_MASK) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "unsupported RSS type" - " requested"); - return -rte_errno; - } - if (rss->key_len) { - rss_key_len = rss->key_len; - rss_key = rss->key; - } else { - rss_key_len = rss_hash_default_key_len; - rss_key = rss_hash_default_key; - } - if (rss_key_len != RTE_DIM(parser->rss_key)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "RSS hash key must be" - " exactly 40 bytes long"); - return -rte_errno; - } - if (!rss->queue_num) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "no valid queues"); - return -rte_errno; - } - if (rss->queue_num > RTE_DIM(parser->queues)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "too many queues for RSS" - " context"); - return -rte_errno; - } - for (n = 0; n < rss->queue_num; ++n) { - if (rss->queue[n] >= priv->rxqs_n) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue id > number of" - " queues"); - return -rte_errno; - } - } - parser->rss_conf = (struct rte_flow_action_rss){ - .func = RTE_ETH_HASH_FUNCTION_DEFAULT, - .level = rss->level ? rss->level : 1, - .types = rss->types, - .key_len = rss_key_len, - .queue_num = rss->queue_num, - .key = memcpy(parser->rss_key, rss_key, - sizeof(*rss_key) * rss_key_len), - .queue = memcpy(parser->queues, rss->queue, - sizeof(*rss->queue) * - rss->queue_num), - }; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) { - const struct rte_flow_action_mark *mark = - (const struct rte_flow_action_mark *) - actions->conf; - - if (overlap & MARK) - goto exit_action_overlap; - overlap |= MARK; - if (!mark) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "mark must be defined"); - return -rte_errno; - } else if (mark->id >= MLX5_FLOW_MARK_MAX) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "mark must be between 0" - " and 16777199"); - return -rte_errno; - } - parser->mark = 1; - parser->mark_id = mark->id; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { - if (overlap & MARK) - goto exit_action_overlap; - overlap |= MARK; - parser->mark = 1; - } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT && - priv->config.flow_counter_en) { - if (overlap & COUNT) - goto exit_action_overlap; - overlap |= COUNT; - parser->count = 1; - } else { - goto exit_action_not_supported; - } - } - /* When fate is unknown, drop traffic. */ - if (!(overlap & FATE)) - parser->drop = 1; - if (parser->drop && parser->mark) - parser->mark = 0; - if (!parser->rss_conf.queue_num && !parser->drop) { - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "no valid action"); - return -rte_errno; + switch (priv->config.flow_prio) { + case RTE_DIM(priority_map_3): + priority = priority_map_3[priority][subpriority]; + break; + case RTE_DIM(priority_map_5): + priority = priority_map_5[priority][subpriority]; + break; } - return 0; -exit_action_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "action not supported"); - return -rte_errno; -exit_action_overlap: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, - actions, "overlapping actions are not supported"); - return -rte_errno; + flow->cur_verbs->attr->priority = priority; } /** - * Validate items. + * Get a flow counter. * - * @param[in] items - * Pattern specification (list terminated by the END pattern item). - * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] shared + * Indicate if this counter is shared with other flows. + * @param[in] id + * Counter identifier. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * A pointer to the counter, NULL otherwise and rte_errno is set. */ -static int -mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, - const struct rte_flow_item items[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +static struct mlx5_flow_counter * +mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) { struct priv *priv = dev->data->dev_private; - const struct mlx5_flow_items *cur_item = mlx5_flow_items; - unsigned int i; - unsigned int last_voids = 0; - int ret = 0; - - /* Initialise the offsets to start after verbs attribute. */ - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset = sizeof(struct ibv_flow_attr); - for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { - const struct mlx5_flow_items *token = NULL; - unsigned int n; + struct mlx5_flow_counter *cnt; - if (items->type == RTE_FLOW_ITEM_TYPE_VOID) { - last_voids++; + LIST_FOREACH(cnt, &priv->flow_counters, next) { + if (!cnt->shared || cnt->shared != shared) continue; - } - for (i = 0; - cur_item->items && - cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END; - ++i) { - if (cur_item->items[i] == items->type) { - token = &mlx5_flow_items[items->type]; - break; - } - } - if (!token) { - ret = -ENOTSUP; - goto exit_item_not_supported; - } - cur_item = token; - ret = mlx5_flow_item_validate(items, - (const uint8_t *)cur_item->mask, - cur_item->mask_sz); - if (ret) - goto exit_item_not_supported; - if (IS_TUNNEL(items->type)) { - if (parser->tunnel && - !((items - last_voids - 1)->type == - RTE_FLOW_ITEM_TYPE_GRE && items->type == - RTE_FLOW_ITEM_TYPE_MPLS)) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - items, - "Cannot recognize multiple" - " tunnel encapsulations."); - return -rte_errno; - } - if (items->type == RTE_FLOW_ITEM_TYPE_MPLS && - !priv->config.mpls_en) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - items, - "MPLS not supported or" - " disabled in firmware" - " configuration."); - return -rte_errno; - } - if (!priv->config.tunnel_en && - parser->rss_conf.level > 1) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - items, - "RSS on tunnel is not supported"); - return -rte_errno; - } - parser->inner = IBV_FLOW_SPEC_INNER; - parser->tunnel = flow_ptype[items->type]; - } - if (parser->drop) { - parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz; - } else { - for (n = 0; n != hash_rxq_init_n; ++n) - parser->queue[n].offset += cur_item->dst_sz; - } - last_voids = 0; - } - if (parser->drop) { - parser->queue[HASH_RXQ_ETH].offset += - sizeof(struct ibv_flow_spec_action_drop); - } - if (parser->mark) { - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset += - sizeof(struct ibv_flow_spec_action_tag); - } - if (parser->count) { - unsigned int size = sizeof(struct ibv_flow_spec_counter_action); - - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset += size; + if (cnt->id != id) + continue; + cnt->ref_cnt++; + return cnt; } - return 0; -exit_item_not_supported: - return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); -} +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT -/** - * Allocate memory space to store verbs flow attributes. - * - * @param[in] size - * Amount of byte to allocate. - * @param[out] error - * Perform verbose error reporting if not NULL. - * - * @return - * A verbs flow attribute on success, NULL otherwise and rte_errno is set. - */ -static struct ibv_flow_attr * -mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error) -{ - struct ibv_flow_attr *ibv_attr; + struct mlx5_flow_counter tmpl = { + .shared = shared, + .id = id, + .cs = mlx5_glue->create_counter_set + (priv->ctx, + &(struct ibv_counter_set_init_attr){ + .counter_set_id = id, + }), + .hits = 0, + .bytes = 0, + }; - ibv_attr = rte_calloc(__func__, 1, size, 0); - if (!ibv_attr) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot allocate verbs spec attributes"); + if (!tmpl.cs) { + rte_errno = errno; return NULL; } - return ibv_attr; -} - -/** - * Make inner packet matching with an higher priority from the non Inner - * matching. - * - * @param dev - * Pointer to Ethernet device. - * @param[in, out] parser - * Internal parser structure. - * @param attr - * User flow attribute. - */ -static void -mlx5_flow_update_priority(struct rte_eth_dev *dev, - struct mlx5_flow_parse *parser, - const struct rte_flow_attr *attr) -{ - struct priv *priv = dev->data->dev_private; - unsigned int i; - uint16_t priority; - - /* 8 priorities >= 16 priorities - * Control flow: 4-7 8-15 - * User normal flow: 1-3 4-7 - * User tunnel flow: 0-2 0-3 - */ - priority = attr->priority * MLX5_VERBS_FLOW_PRIO_8; - if (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8) - priority /= 2; - /* - * Lower non-tunnel flow Verbs priority 1 if only support 8 Verbs - * priorities, lower 4 otherwise. - */ - if (!parser->inner) { - if (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8) - priority += 1; - else - priority += MLX5_VERBS_FLOW_PRIO_8 / 2; - } - if (parser->drop) { - parser->queue[HASH_RXQ_ETH].ibv_attr->priority = priority + - hash_rxq_init[HASH_RXQ_ETH].flow_priority; - return; - } - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - parser->queue[i].ibv_attr->priority = priority + - hash_rxq_init[i].flow_priority; + cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); + if (!cnt) { + rte_errno = ENOMEM; + return NULL; } + *cnt = tmpl; + LIST_INSERT_HEAD(&priv->flow_counters, cnt, next); + return cnt; +#endif + rte_errno = ENOTSUP; + return NULL; } /** - * Finalise verbs flow attributes. + * Release a flow counter. * - * @param[in, out] parser - * Internal parser structure. + * @param[in] counter + * Pointer to the counter handler. */ static void -mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser) +mlx5_flow_counter_release(struct mlx5_flow_counter *counter) { - unsigned int i; - uint32_t inner = parser->inner; - - /* Don't create extra flows for outer RSS. */ - if (parser->tunnel && parser->rss_conf.level < 2) - return; - /* - * Fill missing layers in verbs specifications, or compute the correct - * offset to allocate the memory space for the attributes and - * specifications. - */ - for (i = 0; i != hash_rxq_init_n - 1; ++i) { - union { - struct ibv_flow_spec_ipv4_ext ipv4; - struct ibv_flow_spec_ipv6 ipv6; - struct ibv_flow_spec_tcp_udp udp_tcp; - struct ibv_flow_spec_eth eth; - } specs; - void *dst; - uint16_t size; - - if (i == parser->layer) - continue; - if (parser->layer == HASH_RXQ_ETH || - parser->layer == HASH_RXQ_TUNNEL) { - if (hash_rxq_init[i].ip_version == MLX5_IPV4) { - size = sizeof(struct ibv_flow_spec_ipv4_ext); - specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){ - .type = inner | IBV_FLOW_SPEC_IPV4_EXT, - .size = size, - }; - } else { - size = sizeof(struct ibv_flow_spec_ipv6); - specs.ipv6 = (struct ibv_flow_spec_ipv6){ - .type = inner | IBV_FLOW_SPEC_IPV6, - .size = size, - }; - } - if (parser->queue[i].ibv_attr) { - dst = (void *)((uintptr_t) - parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, &specs, size); - ++parser->queue[i].ibv_attr->num_of_specs; - } - parser->queue[i].offset += size; - } - if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) || - (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) { - size = sizeof(struct ibv_flow_spec_tcp_udp); - specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) { - .type = inner | ((i == HASH_RXQ_UDPV4 || - i == HASH_RXQ_UDPV6) ? - IBV_FLOW_SPEC_UDP : - IBV_FLOW_SPEC_TCP), - .size = size, - }; - if (parser->queue[i].ibv_attr) { - dst = (void *)((uintptr_t) - parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, &specs, size); - ++parser->queue[i].ibv_attr->num_of_specs; - } - parser->queue[i].offset += size; - } + if (--counter->ref_cnt == 0) { + claim_zero(mlx5_glue->destroy_counter_set(counter->cs)); + LIST_REMOVE(counter, next); + rte_free(counter); } } /** - * Update flows according to pattern and RSS hash fields. + * Verify the @p attributes will be correctly understood by the NIC and store + * them in the @p flow if everything is correct. * - * @param[in, out] parser - * Internal parser structure. + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] attributes + * Pointer to flow attributes + * @param[in, out] flow + * Pointer to the rte_flow structure. + * @param[out] error + * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_convert_rss(struct mlx5_flow_parse *parser) +mlx5_flow_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attributes, + struct rte_flow *flow, + struct rte_flow_error *error) { - unsigned int i; - enum hash_rxq_type start; - enum hash_rxq_type layer; - int outer = parser->tunnel && parser->rss_conf.level < 2; - uint64_t rss = parser->rss_conf.types; - - layer = outer ? parser->out_layer : parser->layer; - if (layer == HASH_RXQ_TUNNEL) - layer = HASH_RXQ_ETH; - if (outer) { - /* Only one hash type for outer RSS. */ - if (rss && layer == HASH_RXQ_ETH) { - start = HASH_RXQ_TCPV4; - } else if (rss && layer != HASH_RXQ_ETH && - !(rss & hash_rxq_init[layer].dpdk_rss_hf)) { - /* If RSS not match L4 pattern, try L3 RSS. */ - if (layer < HASH_RXQ_IPV4) - layer = HASH_RXQ_IPV4; - else if (layer > HASH_RXQ_IPV4 && layer < HASH_RXQ_IPV6) - layer = HASH_RXQ_IPV6; - start = layer; - } else { - start = layer; - } - /* Scan first valid hash type. */ - for (i = start; rss && i <= layer; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - if (hash_rxq_init[i].dpdk_rss_hf & rss) - break; - } - if (rss && i <= layer) - parser->queue[layer].hash_fields = - hash_rxq_init[i].hash_fields; - /* Trim unused hash types. */ - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser->queue[i].ibv_attr && i != layer) { - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } - } else { - /* Expand for inner or normal RSS. */ - if (rss && (layer == HASH_RXQ_ETH || layer == HASH_RXQ_IPV4)) - start = HASH_RXQ_TCPV4; - else if (rss && layer == HASH_RXQ_IPV6) - start = HASH_RXQ_TCPV6; - else - start = layer; - /* For L4 pattern, try L3 RSS if no L4 RSS. */ - /* Trim unused hash types. */ - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - if (i < start || i > layer) { - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - continue; - } - if (!rss) - continue; - if (hash_rxq_init[i].dpdk_rss_hf & rss) { - parser->queue[i].hash_fields = - hash_rxq_init[i].hash_fields; - } else if (i != layer) { - /* Remove unused RSS expansion. */ - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } else if (layer < HASH_RXQ_IPV4 && - (hash_rxq_init[HASH_RXQ_IPV4].dpdk_rss_hf & - rss)) { - /* Allow IPv4 RSS on L4 pattern. */ - parser->queue[i].hash_fields = - hash_rxq_init[HASH_RXQ_IPV4] - .hash_fields; - } else if (i > HASH_RXQ_IPV4 && i < HASH_RXQ_IPV6 && - (hash_rxq_init[HASH_RXQ_IPV6].dpdk_rss_hf & - rss)) { - /* Allow IPv4 RSS on L4 pattern. */ - parser->queue[i].hash_fields = - hash_rxq_init[HASH_RXQ_IPV6] - .hash_fields; - } - } - } + uint32_t priority_max = + ((struct priv *)dev->data->dev_private)->config.flow_prio - 1; + + if (attributes->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "groups is not supported"); + if (attributes->priority != MLX5_FLOW_PRIO_RSVD && + attributes->priority >= priority_max) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, + "priority out of range"); + if (attributes->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "egress is not supported"); + if (attributes->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + if (!attributes->ingress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "ingress attribute is mandatory"); + flow->attributes = *attributes; + if (attributes->priority == MLX5_FLOW_PRIO_RSVD) + flow->attributes.priority = priority_max; return 0; } /** - * Validate and convert a flow supported by the NIC. + * Verify the @p item specifications (spec, last, mask) are compatible with the + * NIC capabilities. * - * @param dev - * Pointer to Ethernet device. - * @param[in] attr - * Flow rule attributes. - * @param[in] pattern - * Pattern specification (list terminated by the END pattern item). - * @param[in] actions - * Associated actions (list terminated by the END action). + * @param[in] item + * Item specification. + * @param[in] mask + * @p item->mask or flow default bit-masks. + * @param[in] nic_mask + * Bit-masks covering supported fields by the NIC to compare with user mask. + * @param[in] size + * Bit-masks size in bytes. * @param[out] error - * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. + * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_convert(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +mlx5_flow_item_acceptable(const struct rte_flow_item *item, + const uint8_t *mask, + const uint8_t *nic_mask, + unsigned int size, + struct rte_flow_error *error) { - const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; - int ret; - /* First step. Validate the attributes, items and actions. */ - *parser = (struct mlx5_flow_parse){ - .create = parser->create, - .layer = HASH_RXQ_ETH, - .mark_id = MLX5_FLOW_MARK_DEFAULT, - }; - ret = mlx5_flow_convert_attributes(attr, error); - if (ret) - return ret; - ret = mlx5_flow_convert_actions(dev, actions, error, parser); - if (ret) - return ret; - ret = mlx5_flow_convert_items_validate(dev, items, error, parser); - if (ret) - return ret; - mlx5_flow_convert_finalise(parser); - /* - * Second step. - * Allocate the memory space to store verbs specifications. - */ - if (parser->drop) { - unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; - - parser->queue[HASH_RXQ_ETH].ibv_attr = - mlx5_flow_convert_allocate(offset, error); - if (!parser->queue[HASH_RXQ_ETH].ibv_attr) - goto exit_enomem; - parser->queue[HASH_RXQ_ETH].offset = - sizeof(struct ibv_flow_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) { - unsigned int offset; - - offset = parser->queue[i].offset; - parser->queue[i].ibv_attr = - mlx5_flow_convert_allocate(offset, error); - if (!parser->queue[i].ibv_attr) - goto exit_enomem; - parser->queue[i].offset = sizeof(struct ibv_flow_attr); - } - } - /* Third step. Conversion parse, fill the specifications. */ - parser->inner = 0; - parser->tunnel = 0; - parser->layer = HASH_RXQ_ETH; - for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { - struct mlx5_flow_data data = { - .dev = dev, - .parser = parser, - .error = error, - }; + assert(nic_mask); + for (i = 0; i < size; ++i) + if ((nic_mask[i] | mask[i]) != nic_mask[i]) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "mask enables non supported" + " bits"); + if (!item->spec && (item->mask || item->last)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "mask/last without a spec is not" + " supported"); + if (item->spec && item->last) { + uint8_t spec[size]; + uint8_t last[size]; + unsigned int i; + int ret; - if (items->type == RTE_FLOW_ITEM_TYPE_VOID) - continue; - cur_item = &mlx5_flow_items[items->type]; - ret = cur_item->convert(items, - (cur_item->default_mask ? - cur_item->default_mask : - cur_item->mask), - &data); - if (ret) - goto exit_free; - } - if (!parser->drop) { - /* RSS check, remove unused hash types. */ - ret = mlx5_flow_convert_rss(parser); - if (ret) - goto exit_free; - /* Complete missing specification. */ - mlx5_flow_convert_finalise(parser); - } - mlx5_flow_update_priority(dev, parser, attr); - if (parser->mark) - mlx5_flow_create_flag_mark(parser, parser->mark_id); - if (parser->count && parser->create) { - mlx5_flow_create_count(dev, parser); - if (!parser->cs) - goto exit_count_error; - } -exit_free: - /* Only verification is expected, all resources should be released. */ - if (!parser->create) { - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser->queue[i].ibv_attr) { - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } - } - return ret; -exit_enomem: - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser->queue[i].ibv_attr) { - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; + for (i = 0; i < size; ++i) { + spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; + last[i] = ((const uint8_t *)item->last)[i] & mask[i]; } + ret = memcmp(spec, last, size); + if (ret != 0) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "range is not supported"); } - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot allocate verbs spec attributes"); - return -rte_errno; -exit_count_error: - rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create counter"); - return -rte_errno; + return 0; } /** - * Copy the specification created into the flow. + * Add a verbs item specification into @p flow. * - * @param parser - * Internal parser structure. - * @param src + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] src * Create specification. - * @param size + * @param[in] size * Size in bytes of the specification to copy. */ static void -mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, - unsigned int size) +mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size) { - unsigned int i; - void *dst; + struct mlx5_flow_verbs *verbs = flow->cur_verbs; - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - dst = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset); + if (verbs->specs) { + void *dst; + + dst = (void *)(verbs->specs + verbs->size); memcpy(dst, src, size); - ++parser->queue[i].ibv_attr->num_of_specs; - parser->queue[i].offset += size; + ++verbs->attr->num_of_specs; } + verbs->size += size; +} + +/** + * Adjust verbs hash fields according to the @p flow information. + * + * @param[in, out] flow. + * Pointer to flow structure. + * @param[in] tunnel + * 1 when the hash field is for a tunnel item. + * @param[in] layer_types + * ETH_RSS_* types. + * @param[in] hash_fields + * Item hash fields. + */ +static void +mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow, + int tunnel __rte_unused, + uint32_t layer_types, uint64_t hash_fields) +{ +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0); + if (flow->rss.level == 2 && !tunnel) + hash_fields = 0; + else if (flow->rss.level < 2 && tunnel) + hash_fields = 0; +#endif + if (!(flow->rss.types & layer_types)) + hash_fields = 0; + flow->cur_verbs->hash_fields |= hash_fields; } /** - * Convert Ethernet item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_eth(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; - const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); + const struct rte_flow_item_eth nic_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .type = RTE_BE16(0xffff), + }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const unsigned int size = sizeof(struct ibv_flow_spec_eth); struct ibv_flow_spec_eth eth = { - .type = parser->inner | IBV_FLOW_SPEC_ETH, - .size = eth_size, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - parser->layer = HASH_RXQ_ETH; + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 layers already configured"); + if (!mask) + mask = &rte_flow_item_eth_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_eth), + error); + if (ret) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + if (size > flow_size) + return size; if (spec) { unsigned int i; - if (!mask) - mask = default_mask; memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN); memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN); eth.val.ether_type = spec->type; @@ -1459,112 +824,211 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, } eth.val.ether_type &= eth.mask.ether_type; } - mlx5_flow_create_copy(parser, ð, eth_size); - return 0; + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + mlx5_flow_spec_verbs_add(flow, ð, size); + return size; +} + +/** + * Update the VLAN tag in the Verbs Ethernet specification. + * + * @param[in, out] attr + * Pointer to Verbs attributes structure. + * @param[in] eth + * Verbs structure containing the VLAN information to copy. + */ +static void +mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr, + struct ibv_flow_spec_eth *eth) +{ + unsigned int i; + const enum ibv_flow_spec_type search = eth->type; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + struct ibv_flow_spec_eth *e = + (struct ibv_flow_spec_eth *)hdr; + + e->val.vlan_tag = eth->val.vlan_tag; + e->mask.vlan_tag = eth->mask.vlan_tag; + e->val.ether_type = eth->val.ether_type; + e->mask.ether_type = eth->mask.ether_type; + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } } /** - * Convert VLAN item to Verbs specification. + * Convert the @p item into @p flow (or by updating the already present + * Ethernet Verbs) specification after ensuring the NIC will understand and + * process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_vlan(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; - struct ibv_flow_spec_eth *eth; - const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); - const char *msg = "VLAN cannot be empty"; - + const struct rte_flow_item_vlan nic_mask = { + .tci = RTE_BE16(0x0fff), + .inner_type = RTE_BE16(0xffff), + }; + unsigned int size = sizeof(struct ibv_flow_spec_eth); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + struct ibv_flow_spec_eth eth = { + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, + }; + int ret; + const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4); + const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + + if (flow->layers & vlanm) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN layer already configured"); + else if ((flow->layers & l34m) != 0) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 layer cannot follow L3/L4 layer"); + if (!mask) + mask = &rte_flow_item_vlan_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_vlan), error); + if (ret) + return ret; if (spec) { - unsigned int i; - if (!mask) - mask = default_mask; - - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - - eth = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset - eth_size); - eth->val.vlan_tag = spec->tci; - eth->mask.vlan_tag = mask->tci; - eth->val.vlan_tag &= eth->mask.vlan_tag; - /* - * From verbs perspective an empty VLAN is equivalent - * to a packet without VLAN layer. - */ - if (!eth->mask.vlan_tag) - goto error; - /* Outer TPID cannot be matched. */ - if (eth->mask.ether_type) { - msg = "VLAN TPID matching is not supported"; - goto error; - } - eth->val.ether_type = spec->inner_type; - eth->mask.ether_type = mask->inner_type; - eth->val.ether_type &= eth->mask.ether_type; - } - return 0; + eth.val.vlan_tag = spec->tci; + eth.mask.vlan_tag = mask->tci; + eth.val.vlan_tag &= eth.mask.vlan_tag; + eth.val.ether_type = spec->inner_type; + eth.mask.ether_type = mask->inner_type; + eth.val.ether_type &= eth.mask.ether_type; } -error: - return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, - item, msg); + /* + * From verbs perspective an empty VLAN is equivalent + * to a packet without VLAN layer. + */ + if (!eth.mask.vlan_tag) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "VLAN cannot be empty"); + if (!(flow->layers & l2m)) { + if (size <= flow_size) { + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + mlx5_flow_spec_verbs_add(flow, ð, size); + } + } else { + if (flow->cur_verbs) + mlx5_flow_item_vlan_update(flow->cur_verbs->attr, + ð); + size = 0; /* Only an update is done in eth specification. */ + } + flow->layers |= tunnel ? + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN); + return size; } /** - * Convert IPv4 item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_ipv4(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { - struct priv *priv = data->dev->data->dev_private; const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; - unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext); + const struct rte_flow_item_ipv4 nic_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .next_proto_id = 0xff, + }, + }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); struct ibv_flow_spec_ipv4_ext ipv4 = { - .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT, - .size = ipv4_size, + .type = IBV_FLOW_SPEC_IPV4_EXT | + (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - if (parser->layer == HASH_RXQ_TUNNEL && - parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && - !priv->config.l3_vxlan_en) - return rte_flow_error_set(data->error, EINVAL, + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple L3 layers not supported"); + else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "L3 VXLAN not enabled by device" - " parameter and/or not configured" - " in firmware"); - parser->layer = HASH_RXQ_IPV4; + "L3 cannot follow an L4 layer."); + if (!mask) + mask = &rte_flow_item_ipv4_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret < 0) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; if (spec) { - if (!mask) - mask = default_mask; ipv4.val = (struct ibv_flow_ipv4_ext_filter){ .src_ip = spec->hdr.src_addr, .dst_ip = spec->hdr.dst_addr, @@ -1583,55 +1047,108 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, ipv4.val.proto &= ipv4.mask.proto; ipv4.val.tos &= ipv4.mask.tos; } - mlx5_flow_create_copy(parser, &ipv4, ipv4_size); - return 0; + flow->l3_protocol_en = !!ipv4.mask.proto; + flow->l3_protocol = ipv4.val.proto; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER), + (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; + mlx5_flow_spec_verbs_add(flow, &ipv4, size); + } + return size; } /** - * Convert IPv6 item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_ipv6(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { - struct priv *priv = data->dev->data->dev_private; const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; - unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6); + const struct rte_flow_item_ipv6 nic_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xffffffff), + .proto = 0xff, + .hop_limits = 0xff, + }, + }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_ipv6); struct ibv_flow_spec_ipv6 ipv6 = { - .type = parser->inner | IBV_FLOW_SPEC_IPV6, - .size = ipv6_size, + .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - if (parser->layer == HASH_RXQ_TUNNEL && - parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && - !priv->config.l3_vxlan_en) - return rte_flow_error_set(data->error, EINVAL, + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple L3 layers not supported"); + else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 cannot follow an L4 layer."); + /* + * IPv6 is not recognised by the NIC inside a GRE tunnel. + * Such support has to be disabled as the rule will be + * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and + * Mellanox OFED 4.4-1.0.0.0. + */ + if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE) + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "L3 VXLAN not enabled by device" - " parameter and/or not configured" - " in firmware"); - parser->layer = HASH_RXQ_IPV6; + "IPv6 inside a GRE tunnel is" + " not recognised."); + if (!mask) + mask = &rte_flow_item_ipv6_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6), error); + if (ret < 0) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; if (spec) { unsigned int i; uint32_t vtc_flow_val; uint32_t vtc_flow_mask; - if (!mask) - mask = default_mask; memcpy(&ipv6.val.src_ip, spec->hdr.src_addr, RTE_DIM(ipv6.val.src_ip)); memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr, @@ -1666,44 +1183,86 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, ipv6.val.next_hdr &= ipv6.mask.next_hdr; ipv6.val.hop_limit &= ipv6.mask.hop_limit; } - mlx5_flow_create_copy(parser, &ipv6, ipv6_size); - return 0; + flow->l3_protocol_en = !!ipv6.mask.next_hdr; + flow->l3_protocol = ipv6.val.next_hdr; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER), + (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; + mlx5_flow_spec_verbs_add(flow, &ipv6, size); + } + return size; } /** - * Convert UDP item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_udp(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; - unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp udp = { - .type = parser->inner | IBV_FLOW_SPEC_UDP, - .size = udp_size, + .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_UDPV4; - else - parser->layer = HASH_RXQ_UDPV6; + if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "protocol filtering not compatible" + " with UDP layer"); + if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 is mandatory to filter" + " on L4"); + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L4 layer is already" + " present"); + if (!mask) + mask = &rte_flow_item_udp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_udp_mask, + sizeof(struct rte_flow_item_udp), error); + if (ret < 0) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; if (spec) { - if (!mask) - mask = default_mask; udp.val.dst_port = spec->hdr.dst_port; udp.val.src_port = spec->hdr.src_port; udp.mask.dst_port = mask->hdr.dst_port; @@ -1712,44 +1271,81 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, udp.val.src_port &= udp.mask.src_port; udp.val.dst_port &= udp.mask.dst_port; } - mlx5_flow_create_copy(parser, &udp, udp_size); - return 0; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP, + (IBV_RX_HASH_SRC_PORT_UDP | + IBV_RX_HASH_DST_PORT_UDP)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; + mlx5_flow_spec_verbs_add(flow, &udp, size); + } + return size; } /** - * Convert TCP item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_tcp(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; - unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp tcp = { - .type = parser->inner | IBV_FLOW_SPEC_TCP, - .size = tcp_size, + .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), + .size = size, }; + int ret; - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_TCPV4; - else - parser->layer = HASH_RXQ_TCPV6; + if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "protocol filtering not compatible" + " with TCP layer"); + if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 is mandatory to filter on L4"); + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L4 layer is already present"); + if (!mask) + mask = &rte_flow_item_tcp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_tcp_mask, + sizeof(struct rte_flow_item_tcp), error); + if (ret < 0) + return ret; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; if (spec) { - if (!mask) - mask = default_mask; tcp.val.dst_port = spec->hdr.dst_port; tcp.val.src_port = spec->hdr.src_port; tcp.mask.dst_port = mask->hdr.dst_port; @@ -1758,49 +1354,78 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, tcp.val.src_port &= tcp.mask.src_port; tcp.val.dst_port &= tcp.mask.dst_port; } - mlx5_flow_create_copy(parser, &tcp, tcp_size); - return 0; + if (size <= flow_size) { + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP, + (IBV_RX_HASH_SRC_PORT_TCP | + IBV_RX_HASH_DST_PORT_TCP)); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; + mlx5_flow_spec_verbs_add(flow, &tcp, size); + } + return size; } /** - * Convert VXLAN item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_vxlan(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) { const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; unsigned int size = sizeof(struct ibv_flow_spec_tunnel); struct ibv_flow_spec_tunnel vxlan = { - .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, .size = size, }; + int ret; union vni { uint32_t vlan_id; uint8_t vni[4]; - } id; + } id = { .vlan_id = 0, }; - id.vni[0] = 0; - parser->inner = IBV_FLOW_SPEC_INNER; - parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)]; - parser->out_layer = parser->layer; - parser->layer = HASH_RXQ_TUNNEL; + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already present"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_mask, + sizeof(struct rte_flow_item_vxlan), error); + if (ret < 0) + return ret; if (spec) { - if (!mask) - mask = default_mask; memcpy(&id.vni[1], spec->vni, 3); vxlan.val.tunnel_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); @@ -1809,148 +1434,272 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; } /* - * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this - * layer is defined in the Verbs specification it is interpreted as - * wildcard and all packets will match this rule, if it follows a full - * stack layer (ex: eth / ipv4 / udp), all packets matching the layers - * before will also match this rule. - * To avoid such situation, VNI 0 is currently refused. + * Tunnel id 0 is equivalent as not adding a VXLAN layer, if + * only this layer is defined in the Verbs specification it is + * interpreted as wildcard and all packets will match this + * rule, if it follows a full stack layer (ex: eth / ipv4 / + * udp), all packets matching the layers before will also + * match this rule. To avoid such situation, VNI 0 is + * currently refused. */ - /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ - if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) - return rte_flow_error_set(data->error, EINVAL, + if (!vxlan.val.tunnel_id) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "VxLAN vni cannot be 0"); - mlx5_flow_create_copy(parser, &vxlan, size); - return 0; + "VXLAN vni cannot be 0"); + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN tunnel must be fully defined"); + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &vxlan, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_VXLAN; + return size; } /** - * Convert VXLAN-GPE item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param dev + * Pointer to Ethernet device. + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) { - struct priv *priv = data->dev->data->dev_private; const struct rte_flow_item_vxlan_gpe *spec = item->spec; const struct rte_flow_item_vxlan_gpe *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; unsigned int size = sizeof(struct ibv_flow_spec_tunnel); - struct ibv_flow_spec_tunnel vxlan = { - .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, + struct ibv_flow_spec_tunnel vxlan_gpe = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, .size = size, }; + int ret; union vni { uint32_t vlan_id; uint8_t vni[4]; - } id; + } id = { .vlan_id = 0, }; - if (!priv->config.l3_vxlan_en) - return rte_flow_error_set(data->error, EINVAL, + if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en) + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "L3 VXLAN not enabled by device" - " parameter and/or not configured" - " in firmware"); - id.vni[0] = 0; - parser->inner = IBV_FLOW_SPEC_INNER; - parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)]; - parser->out_layer = parser->layer; - parser->layer = HASH_RXQ_TUNNEL; + "L3 VXLAN is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already present"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_gpe_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, + sizeof(struct rte_flow_item_vxlan_gpe), error); + if (ret < 0) + return ret; if (spec) { - if (!mask) - mask = default_mask; memcpy(&id.vni[1], spec->vni, 3); - vxlan.val.tunnel_id = id.vlan_id; + vxlan_gpe.val.tunnel_id = id.vlan_id; memcpy(&id.vni[1], mask->vni, 3); - vxlan.mask.tunnel_id = id.vlan_id; + vxlan_gpe.mask.tunnel_id = id.vlan_id; if (spec->protocol) - return rte_flow_error_set(data->error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "VxLAN-GPE protocol not" - " supported"); + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN-GPE protocol not supported"); /* Remove unwanted bits from values. */ - vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; + vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id; } /* * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this * layer is defined in the Verbs specification it is interpreted as * wildcard and all packets will match this rule, if it follows a full * stack layer (ex: eth / ipv4 / udp), all packets matching the layers - * before will also match this rule. - * To avoid such situation, VNI 0 is currently refused. + * before will also match this rule. To avoid such situation, VNI 0 + * is currently refused. */ - /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ - if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) - return rte_flow_error_set(data->error, EINVAL, + if (!vxlan_gpe.val.tunnel_id) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, - "VxLAN-GPE vni cannot be 0"); - mlx5_flow_create_copy(parser, &vxlan, size); - return 0; + "VXLAN-GPE vni cannot be 0"); + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN-GPE tunnel must be fully" + " defined"); + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE; + return size; +} + +/** + * Update the protocol in Verbs IPv4/IPv6 spec. + * + * @param[in, out] attr + * Pointer to Verbs attributes structure. + * @param[in] search + * Specification type to search in order to update the IP protocol. + * @param[in] protocol + * Protocol value to set if none is present in the specification. + */ +static void +mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, + enum ibv_flow_spec_type search, + uint8_t protocol) +{ + unsigned int i; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + if (!attr) + return; + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + union { + struct ibv_flow_spec_ipv4_ext *ipv4; + struct ibv_flow_spec_ipv6 *ipv6; + } ip; + + switch (search) { + case IBV_FLOW_SPEC_IPV4_EXT: + ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr; + if (!ip.ipv4->val.proto) { + ip.ipv4->val.proto = protocol; + ip.ipv4->mask.proto = 0xff; + } + break; + case IBV_FLOW_SPEC_IPV6: + ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr; + if (!ip.ipv6->val.next_hdr) { + ip.ipv6->val.next_hdr = protocol; + ip.ipv6->mask.next_hdr = 0xff; + } + break; + default: + break; + } + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } } /** - * Convert GRE item to Verbs specification. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * It will also update the previous L3 layer with the protocol value matching + * the GRE. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param dev + * Pointer to Ethernet device. + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_gre(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_gre(const struct rte_flow_item *item, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) { - struct mlx5_flow_parse *parser = data->parser; -#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT - (void)default_mask; - unsigned int size = sizeof(struct ibv_flow_spec_tunnel); - struct ibv_flow_spec_tunnel tunnel = { - .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, - .size = size, - }; -#else + struct mlx5_flow_verbs *verbs = flow->cur_verbs; const struct rte_flow_item_gre *spec = item->spec; const struct rte_flow_item_gre *mask = item->mask; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT unsigned int size = sizeof(struct ibv_flow_spec_gre); struct ibv_flow_spec_gre tunnel = { - .type = parser->inner | IBV_FLOW_SPEC_GRE, + .type = IBV_FLOW_SPEC_GRE, + .size = size, + }; +#else + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel tunnel = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, .size = size, }; #endif - struct ibv_flow_spec_ipv4_ext *ipv4; - struct ibv_flow_spec_ipv6 *ipv6; - unsigned int i; + int ret; - parser->inner = IBV_FLOW_SPEC_INNER; - parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)]; - parser->out_layer = parser->layer; - parser->layer = HASH_RXQ_TUNNEL; + if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "protocol filtering not compatible" + " with this GRE layer"); + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already present"); + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_gre_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_gre_mask, + sizeof(struct rte_flow_item_gre), error); + if (ret < 0) + return ret; #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT if (spec) { - if (!mask) - mask = default_mask; tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; tunnel.val.protocol = spec->protocol; tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; @@ -1960,480 +1709,1288 @@ mlx5_flow_create_gre(const struct rte_flow_item *item, tunnel.val.protocol &= tunnel.mask.protocol; tunnel.val.key &= tunnel.mask.key; } -#endif - /* Update encapsulation IP layer protocol. */ - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) - continue; - if (parser->out_layer == HASH_RXQ_IPV4) { - ipv4 = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset - - sizeof(struct ibv_flow_spec_ipv4_ext)); - if (ipv4->mask.proto && ipv4->val.proto != MLX5_GRE) - break; - ipv4->val.proto = MLX5_GRE; - ipv4->mask.proto = 0xff; - } else if (parser->out_layer == HASH_RXQ_IPV6) { - ipv6 = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset - - sizeof(struct ibv_flow_spec_ipv6)); - if (ipv6->mask.next_hdr && - ipv6->val.next_hdr != MLX5_GRE) - break; - ipv6->val.next_hdr = MLX5_GRE; - ipv6->mask.next_hdr = 0xff; - } - } - if (i != hash_rxq_init_n) - return rte_flow_error_set(data->error, EINVAL, +#else + if (spec && (spec->protocol & mask->protocol)) + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "IP protocol of GRE must be 47"); - mlx5_flow_create_copy(parser, &tunnel, size); - return 0; + "without MPLS support the" + " specification cannot be used for" + " filtering"); +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */ + if (size <= flow_size) { + if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + mlx5_flow_item_gre_ip_protocol_update + (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT, + MLX5_IP_PROTOCOL_GRE); + else + mlx5_flow_item_gre_ip_protocol_update + (verbs->attr, IBV_FLOW_SPEC_IPV6, + MLX5_IP_PROTOCOL_GRE); + mlx5_flow_spec_verbs_add(flow, &tunnel, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_GRE; + return size; } /** - * Convert MPLS item to Verbs specification. - * MPLS tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP. + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. * - * @param item[in] + * @param[in] item * Item specification. - * @param default_mask[in] - * Default bit-masks to use when item->mask is not provided. - * @param data[in, out] - * User structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_mpls(const struct rte_flow_item *item, - const void *default_mask, - struct mlx5_flow_data *data) +mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused, + struct rte_flow *flow __rte_unused, + const size_t flow_size __rte_unused, + struct rte_flow_error *error) { -#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT - (void)default_mask; - return rte_flow_error_set(data->error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "MPLS is not supported by driver"); -#else +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT const struct rte_flow_item_mpls *spec = item->spec; const struct rte_flow_item_mpls *mask = item->mask; - struct mlx5_flow_parse *parser = data->parser; unsigned int size = sizeof(struct ibv_flow_spec_mpls); struct ibv_flow_spec_mpls mpls = { .type = IBV_FLOW_SPEC_MPLS, .size = size, }; + int ret; - parser->inner = IBV_FLOW_SPEC_INNER; - if (parser->layer == HASH_RXQ_UDPV4 || - parser->layer == HASH_RXQ_UDPV6) { - parser->tunnel = - ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)]; - parser->out_layer = parser->layer; - } else { - parser->tunnel = - ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)]; - /* parser->out_layer stays as in GRE out_layer. */ - } - parser->layer = HASH_RXQ_TUNNEL; + if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "protocol filtering not compatible" + " with MPLS layer"); + /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL && + (flow->layers & MLX5_FLOW_LAYER_GRE) != MLX5_FLOW_LAYER_GRE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already" + " present"); + if (!mask) + mask = &rte_flow_item_mpls_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_mpls_mask, + sizeof(struct rte_flow_item_mpls), error); + if (ret < 0) + return ret; if (spec) { - if (!mask) - mask = default_mask; - /* - * The verbs label field includes the entire MPLS header: - * bits 0:19 - label value field. - * bits 20:22 - traffic class field. - * bits 23 - bottom of stack bit. - * bits 24:31 - ttl field. - */ - mpls.val.label = *(const uint32_t *)spec; - mpls.mask.label = *(const uint32_t *)mask; - /* Remove unwanted bits from values. */ + memcpy(&mpls.val.label, spec, sizeof(mpls.val.label)); + memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label)); + /* Remove unwanted bits from values. */ mpls.val.label &= mpls.mask.label; } - mlx5_flow_create_copy(parser, &mpls, size); - return 0; -#endif + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &mpls, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_MPLS; + return size; +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MPLS is not supported by Verbs, please" + " update."); } /** - * Convert mark/flag action to Verbs specification. + * Convert the @p pattern into a Verbs specifications after ensuring the NIC + * will understand and process it correctly. + * The conversion is performed item per item, each of them is written into + * the @p flow if its size is lesser or equal to @p flow_size. + * Validation and memory consumption computation are still performed until the + * end of @p pattern, unless an error is encountered. * - * @param parser - * Internal parser structure. - * @param mark_id - * Mark identifier. + * @param[in] pattern + * Flow pattern. + * @param[in, out] flow + * Pointer to the rte_flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @pattern has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_items(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + int remain = flow_size; + size_t size = 0; + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + int ret = 0; + + switch (pattern->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + ret = mlx5_flow_item_eth(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + ret = mlx5_flow_item_vlan(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ret = mlx5_flow_item_ipv4(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ret = mlx5_flow_item_ipv6(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + ret = mlx5_flow_item_udp(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = mlx5_flow_item_tcp(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = mlx5_flow_item_vxlan(pattern, flow, remain, + error); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow, + remain, error); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + ret = mlx5_flow_item_gre(pattern, flow, remain, error); + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + ret = mlx5_flow_item_mpls(pattern, flow, remain, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "item not supported"); + } + if (ret < 0) + return ret; + if (remain > ret) + remain -= ret; + else + remain = 0; + size += ret; + } + if (!flow->layers) { + const struct rte_flow_item item = { + .type = RTE_FLOW_ITEM_TYPE_ETH, + }; + + return mlx5_flow_item_eth(&item, flow, flow_size, error); + } + return size; +} + +/** + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_drop(const struct rte_flow_action *action, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + unsigned int size = sizeof(struct ibv_flow_spec_action_drop); + struct ibv_flow_spec_action_drop drop = { + .type = IBV_FLOW_SPEC_ACTION_DROP, + .size = size, + }; + + if (flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "multiple fate actions are not" + " supported"); + if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "drop is not compatible with" + " flag/mark action"); + if (size < flow_size) + mlx5_flow_spec_verbs_add(flow, &drop, size); + flow->fate |= MLX5_FLOW_FATE_DROP; + return size; +} + +/** + * Convert the @p action into @p flow after ensuring the NIC will understand + * and process it correctly. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) +mlx5_flow_action_queue(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_flow_action_queue *queue = action->conf; + + if (flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "multiple fate actions are not" + " supported"); + if (queue->index >= priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue index out of range"); + if (!(*priv->rxqs)[queue->index]) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue is not configured"); + if (flow->queue) + (*flow->queue)[0] = queue->index; + flow->rss.queue_num = 1; + flow->fate |= MLX5_FLOW_FATE_QUEUE; + return 0; +} + +/** + * Ensure the @p action will be understood and used correctly by the NIC. + * + * @param dev + * Pointer to Ethernet device structure. + * @param action[in] + * Pointer to flow actions array. + * @param flow[in, out] + * Pointer to the rte_flow structure. + * @param error[in, out] + * Pointer to error structure. + * + * @return + * On success @p flow->queue array and @p flow->rss are filled and valid. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_rss(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_flow_action_rss *rss = action->conf; + unsigned int i; + + if (flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "multiple fate actions are not" + " supported"); + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->func, + "RSS hash function not supported"); +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (rss->level > 2) +#else + if (rss->level > 1) +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->level, + "tunnel RSS is not supported"); + if (rss->key_len < MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too small"); + if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too large"); + if (!rss->queue_num) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + rss, + "no queues were provided for RSS"); + if (rss->queue_num > priv->config.ind_table_max_size) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue_num, + "number of queues too large"); + if (rss->types & MLX5_RSS_HF_MASK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->types, + "some RSS protocols are not" + " supported"); + for (i = 0; i != rss->queue_num; ++i) { + if (rss->queue[i] >= priv->rxqs_n) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + rss, + "queue index out of range"); + if (!(*priv->rxqs)[rss->queue[i]]) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], + "queue is not configured"); + } + if (flow->queue) + memcpy((*flow->queue), rss->queue, + rss->queue_num * sizeof(uint16_t)); + flow->rss.queue_num = rss->queue_num; + memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN); + flow->rss.types = rss->types; + flow->rss.level = rss->level; + flow->fate |= MLX5_FLOW_FATE_RSS; + return 0; +} + +/** + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_flag(const struct rte_flow_action *action, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) { unsigned int size = sizeof(struct ibv_flow_spec_action_tag); struct ibv_flow_spec_action_tag tag = { .type = IBV_FLOW_SPEC_ACTION_TAG, .size = size, - .tag_id = mlx5_flow_mark_set(mark_id), + .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT), }; + struct mlx5_flow_verbs *verbs = flow->cur_verbs; + + if (flow->modifier & MLX5_FLOW_MOD_FLAG) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "flag action already present"); + if (flow->fate & MLX5_FLOW_FATE_DROP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "flag is not compatible with drop" + " action"); + if (flow->modifier & MLX5_FLOW_MOD_MARK) + size = 0; + else if (size <= flow_size && verbs) + mlx5_flow_spec_verbs_add(flow, &tag, size); + flow->modifier |= MLX5_FLOW_MOD_FLAG; + return size; +} - assert(parser->mark); - mlx5_flow_create_copy(parser, &tag, size); - return 0; +/** + * Update verbs specification to modify the flag to mark. + * + * @param[in, out] verbs + * Pointer to the mlx5_flow_verbs structure. + * @param[in] mark_id + * Mark identifier to replace the flag. + */ +static void +mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id) +{ + struct ibv_spec_header *hdr; + int i; + + if (!verbs) + return; + /* Update Verbs specification. */ + hdr = (struct ibv_spec_header *)verbs->specs; + if (!hdr) + return; + for (i = 0; i != verbs->attr->num_of_specs; ++i) { + if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) { + struct ibv_flow_spec_action_tag *t = + (struct ibv_flow_spec_action_tag *)hdr; + + t->tag_id = mlx5_flow_mark_set(mark_id); + } + hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size); + } } /** - * Convert count action to Verbs specification. + * Convert the @p action into @p flow (or by updating the already present + * Flag Verbs specification) after ensuring the NIC will understand and + * process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param[in] action + * Action configuration. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. * - * @param dev - * Pointer to Ethernet device. - * @param parser - * Pointer to MLX5 flow parser structure. + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_action_mark(const struct rte_flow_action *action, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + const struct rte_flow_action_mark *mark = action->conf; + unsigned int size = sizeof(struct ibv_flow_spec_action_tag); + struct ibv_flow_spec_action_tag tag = { + .type = IBV_FLOW_SPEC_ACTION_TAG, + .size = size, + }; + struct mlx5_flow_verbs *verbs = flow->cur_verbs; + + if (!mark) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "configuration cannot be null"); + if (mark->id >= MLX5_FLOW_MARK_MAX) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &mark->id, + "mark id must in 0 <= id < " + RTE_STR(MLX5_FLOW_MARK_MAX)); + if (flow->modifier & MLX5_FLOW_MOD_MARK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "mark action already present"); + if (flow->fate & MLX5_FLOW_FATE_DROP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "mark is not compatible with drop" + " action"); + if (flow->modifier & MLX5_FLOW_MOD_FLAG) { + mlx5_flow_verbs_mark_update(verbs, mark->id); + size = 0; + } else if (size <= flow_size) { + tag.tag_id = mlx5_flow_mark_set(mark->id); + mlx5_flow_spec_verbs_add(flow, &tag, size); + } + flow->modifier |= MLX5_FLOW_MOD_MARK; + return size; +} + +/** + * Convert the @p action into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param action[in] + * Action configuration. + * @param flow[in, out] + * Pointer to flow structure. + * @param flow_size[in] + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param error[int, out] + * Pointer to error structure. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p action has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, - struct mlx5_flow_parse *parser __rte_unused) +mlx5_flow_action_count(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct rte_flow *flow, + const size_t flow_size __rte_unused, + struct rte_flow_error *error) { + const struct rte_flow_action_count *count = action->conf; #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT - struct priv *priv = dev->data->dev_private; unsigned int size = sizeof(struct ibv_flow_spec_counter_action); - struct ibv_counter_set_init_attr init_attr = {0}; struct ibv_flow_spec_counter_action counter = { .type = IBV_FLOW_SPEC_ACTION_COUNT, .size = size, - .counter_set_handle = 0, }; +#endif - init_attr.counter_set_id = 0; - parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr); - if (!parser->cs) { - rte_errno = EINVAL; - return -rte_errno; - } - counter.counter_set_handle = parser->cs->handle; - mlx5_flow_create_copy(parser, &counter, size); + if (!flow->counter) { + flow->counter = mlx5_flow_counter_new(dev, count->shared, + count->id); + if (!flow->counter) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "cannot get counter" + " context."); + } + if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "flow counters are not supported."); + flow->modifier |= MLX5_FLOW_MOD_COUNT; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + counter.counter_set_handle = flow->counter->cs->handle; + if (size <= flow_size) + mlx5_flow_spec_verbs_add(flow, &counter, size); + return size; #endif return 0; } /** - * Complete flow rule creation with a drop queue. + * Convert the @p action into @p flow after ensuring the NIC will understand + * and process it correctly. + * The conversion is performed action per action, each of them is written into + * the @p flow if its size is lesser or equal to @p flow_size. + * Validation and memory consumption computation are still performed until the + * end of @p action, unless an error is encountered. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in] actions + * Pointer to flow actions array. + * @param[in, out] flow + * Pointer to the rte_flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p actions has fully been + * converted, otherwise another call with this returned memory size should + * be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_actions(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + size_t size = 0; + int remain = flow_size; + int ret = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + ret = mlx5_flow_action_flag(actions, flow, remain, + error); + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = mlx5_flow_action_mark(actions, flow, remain, + error); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + ret = mlx5_flow_action_drop(actions, flow, remain, + error); + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_action_queue(dev, actions, flow, error); + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = mlx5_flow_action_rss(dev, actions, flow, error); + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = mlx5_flow_action_count(dev, actions, flow, remain, + error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + if (ret < 0) + return ret; + if (remain > ret) + remain -= ret; + else + remain = 0; + size += ret; + } + if (!flow->fate) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "no fate action found"); + return size; +} + +/** + * Validate flow rule and fill flow structure accordingly. * * @param dev * Pointer to Ethernet device. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. + * @param[out] flow + * Pointer to flow structure. + * @param flow_size + * Size of allocated space for @p flow. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * A positive value representing the size of the flow object in bytes + * regardless of @p flow_size on success, a negative errno value otherwise + * and rte_errno is set. */ static int -mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +mlx5_flow_merge_switch(struct rte_eth_dev *dev, + struct rte_flow *flow, + size_t flow_size, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - struct ibv_flow_spec_action_drop *drop; - unsigned int size = sizeof(struct ibv_flow_spec_action_drop); + unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0); + uint16_t port_id[!n + n]; + struct mlx5_nl_flow_ptoi ptoi[!n + n + 1]; + size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t)); + unsigned int i; + unsigned int own = 0; + int ret; - assert(priv->pd); - assert(priv->ctx); - flow->drop = 1; - drop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr + - parser->queue[HASH_RXQ_ETH].offset); - *drop = (struct ibv_flow_spec_action_drop){ - .type = IBV_FLOW_SPEC_ACTION_DROP, - .size = size, - }; - ++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs; - parser->queue[HASH_RXQ_ETH].offset += size; - flow->frxq[HASH_RXQ_ETH].ibv_attr = - parser->queue[HASH_RXQ_ETH].ibv_attr; - if (parser->count) - flow->cs = parser->cs; - if (!dev->data->dev_started) - return 0; - parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; - flow->frxq[HASH_RXQ_ETH].ibv_flow = - mlx5_glue->create_flow(priv->flow_drop_queue->qp, - flow->frxq[HASH_RXQ_ETH].ibv_attr); - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "flow rule creation failure"); - goto error; - } - return 0; -error: - assert(flow); - if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { - claim_zero(mlx5_glue->destroy_flow - (flow->frxq[HASH_RXQ_ETH].ibv_flow)); - flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; - } - if (flow->frxq[HASH_RXQ_ETH].ibv_attr) { - rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr); - flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL; + /* At least one port is needed when no switch domain is present. */ + if (!n) { + n = 1; + port_id[0] = dev->data->port_id; + } else { + n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n); + } + for (i = 0; i != n; ++i) { + struct rte_eth_dev_info dev_info; + + rte_eth_dev_info_get(port_id[i], &dev_info); + if (port_id[i] == dev->data->port_id) + own = i; + ptoi[i].port_id = port_id[i]; + ptoi[i].ifindex = dev_info.if_index; + } + /* Ensure first entry of ptoi[] is the current device. */ + if (own) { + ptoi[n] = ptoi[0]; + ptoi[0] = ptoi[own]; + ptoi[own] = ptoi[n]; + } + /* An entry with zero ifindex terminates ptoi[]. */ + ptoi[n].port_id = 0; + ptoi[n].ifindex = 0; + if (flow_size < off) + flow_size = 0; + ret = mlx5_nl_flow_transpose((uint8_t *)flow + off, + flow_size ? flow_size - off : 0, + ptoi, attr, pattern, actions, error); + if (ret < 0) + return ret; + if (flow_size) { + *flow = (struct rte_flow){ + .attributes = *attr, + .nl_flow = (uint8_t *)flow + off, + }; + /* + * Generate a reasonably unique handle based on the address + * of the target buffer. + * + * This is straightforward on 32-bit systems where the flow + * pointer can be used directly. Otherwise, its least + * significant part is taken after shifting it by the + * previous power of two of the pointed buffer size. + */ + if (sizeof(flow) <= 4) + mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow); + else + mlx5_nl_flow_brand + (flow->nl_flow, + (uintptr_t)flow >> + rte_log2_u32(rte_align32prevpow2(flow_size))); } - if (flow->cs) { - claim_zero(mlx5_glue->destroy_counter_set(flow->cs)); - flow->cs = NULL; - parser->cs = NULL; + return off + ret; +} + +static unsigned int +mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +{ + const struct rte_flow_item *item; + unsigned int has_vlan = 0; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + has_vlan = 1; + break; + } } - return -rte_errno; + if (has_vlan) + return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; + return rss_level < 2 ? MLX5_EXPANSION_ROOT : + MLX5_EXPANSION_ROOT_OUTER; } /** - * Create hash Rx queues when RSS is enabled. - * - * @param dev + * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC + * after ensuring the NIC will understand and process it correctly. + * The conversion is only performed item/action per item/action, each of + * them is written into the @p flow if its size is lesser or equal to @p + * flow_size. + * Validation and memory consumption computation are still performed until the + * end, unless an error is encountered. + * + * @param[in] dev * Pointer to Ethernet device. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small some + * garbage may be present. + * @param[in] attributes + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). * @param[out] error * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the flow has fully been converted and + * can be applied, otherwise another call with this returned memory size + * should be done. + * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow, + const size_t flow_size, + const struct rte_flow_attr *attributes, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow local_flow = { .layers = 0, }; + size_t size = sizeof(*flow); + union { + struct rte_flow_expand_rss buf; + uint8_t buffer[2048]; + } expand_buffer; + struct rte_flow_expand_rss *buf = &expand_buffer.buf; + struct mlx5_flow_verbs *original_verbs = NULL; + size_t original_verbs_size = 0; + uint32_t original_layers = 0; + int expanded_pattern_idx = 0; + int ret; + uint32_t i; + + if (attributes->transfer) + return mlx5_flow_merge_switch(dev, flow, flow_size, + attributes, pattern, + actions, error); + if (size > flow_size) + flow = &local_flow; + ret = mlx5_flow_attributes(dev, attributes, flow, error); + if (ret < 0) + return ret; + ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error); + if (ret < 0) + return ret; + if (local_flow.rss.types) { + unsigned int graph_root; + + graph_root = mlx5_find_graph_root(pattern, + local_flow.rss.level); + ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), + pattern, local_flow.rss.types, + mlx5_support_expansion, + graph_root); + assert(ret > 0 && + (unsigned int)ret < sizeof(expand_buffer.buffer)); + } else { + buf->entries = 1; + buf->entry[0].pattern = (void *)(uintptr_t)pattern; + } + size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t), + sizeof(void *)); + if (size <= flow_size) + flow->queue = (void *)(flow + 1); + LIST_INIT(&flow->verbs); + flow->layers = 0; + flow->modifier = 0; + flow->fate = 0; + for (i = 0; i != buf->entries; ++i) { + size_t off = size; + size_t off2; + + flow->layers = original_layers; + size += sizeof(struct ibv_flow_attr) + + sizeof(struct mlx5_flow_verbs); + off2 = size; + if (size < flow_size) { + flow->cur_verbs = (void *)((uintptr_t)flow + off); + flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1); + flow->cur_verbs->specs = + (void *)(flow->cur_verbs->attr + 1); + } + /* First iteration convert the pattern into Verbs. */ + if (i == 0) { + /* Actions don't need to be converted several time. */ + ret = mlx5_flow_actions(dev, actions, flow, + (size < flow_size) ? + flow_size - size : 0, + error); + if (ret < 0) + return ret; + size += ret; + } else { + /* + * Next iteration means the pattern has already been + * converted and an expansion is necessary to match + * the user RSS request. For that only the expanded + * items will be converted, the common part with the + * user pattern are just copied into the next buffer + * zone. + */ + size += original_verbs_size; + if (size < flow_size) { + rte_memcpy(flow->cur_verbs->attr, + original_verbs->attr, + original_verbs_size + + sizeof(struct ibv_flow_attr)); + flow->cur_verbs->size = original_verbs_size; + } + } + ret = mlx5_flow_items + (dev, + (const struct rte_flow_item *) + &buf->entry[i].pattern[expanded_pattern_idx], + flow, + (size < flow_size) ? flow_size - size : 0, error); + if (ret < 0) + return ret; + size += ret; + if (size <= flow_size) { + mlx5_flow_adjust_priority(dev, flow); + LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next); + } + /* + * Keep a pointer of the first verbs conversion and the layers + * it has encountered. + */ + if (i == 0) { + original_verbs = flow->cur_verbs; + original_verbs_size = size - off2; + original_layers = flow->layers; + /* + * move the index of the expanded pattern to the + * first item not addressed yet. + */ + if (pattern->type == RTE_FLOW_ITEM_TYPE_END) { + expanded_pattern_idx++; + } else { + const struct rte_flow_item *item = pattern; + + for (item = pattern; + item->type != RTE_FLOW_ITEM_TYPE_END; + ++item) + expanded_pattern_idx++; + } + } + } + /* Restore the origin layers in the flow. */ + flow->layers = original_layers; + return size; +} + +/** + * Lookup and set the ptype in the data Rx part. A single Ptype can be used, + * if several tunnel rules are used on this queue, the tunnel ptype will be + * cleared. + * + * @param rxq_ctrl + * Rx queue to update. + */ +static void +mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) { unsigned int i; + uint32_t tunnel_ptype = 0; - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!parser->queue[i].ibv_attr) + /* Look up for the ptype to use. */ + for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { + if (!rxq_ctrl->flow_tunnels_n[i]) continue; - flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr; - parser->queue[i].ibv_attr = NULL; - flow->frxq[i].hash_fields = parser->queue[i].hash_fields; - if (!dev->data->dev_started) - continue; - flow->frxq[i].hrxq = - mlx5_hrxq_get(dev, - parser->rss_conf.key, - parser->rss_conf.key_len, - flow->frxq[i].hash_fields, - parser->rss_conf.queue, - parser->rss_conf.queue_num, - parser->tunnel, - parser->rss_conf.level); - if (flow->frxq[i].hrxq) - continue; - flow->frxq[i].hrxq = - mlx5_hrxq_new(dev, - parser->rss_conf.key, - parser->rss_conf.key_len, - flow->frxq[i].hash_fields, - parser->rss_conf.queue, - parser->rss_conf.queue_num, - parser->tunnel, - parser->rss_conf.level); - if (!flow->frxq[i].hrxq) { - return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, - "cannot create hash rxq"); + if (!tunnel_ptype) { + tunnel_ptype = tunnels_info[i].ptype; + } else { + tunnel_ptype = 0; + break; } } - return 0; + rxq_ctrl->rxq.tunnel = tunnel_ptype; } /** - * RXQ update after flow rule creation. + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow. * - * @param dev + * @param[in] dev * Pointer to Ethernet device. - * @param flow - * Pointer to the flow rule. + * @param[in] flow + * Pointer to flow structure. */ static void -mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow) +mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { struct priv *priv = dev->data->dev_private; + const int mark = !!(flow->modifier & + (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; - unsigned int j; - if (!dev->data->dev_started) - return; - for (i = 0; i != flow->rss_conf.queue_num; ++i) { - struct mlx5_rxq_data *rxq_data = (*priv->rxqs) - [(*flow->queues)[i]]; + for (i = 0; i != flow->rss.queue_num; ++i) { + int idx = (*flow->queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - uint8_t tunnel = PTYPE_IDX(flow->tunnel); + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); - rxq_data->mark |= flow->mark; - if (!tunnel) - continue; - rxq_ctrl->tunnel_types[tunnel] += 1; - /* Clear tunnel type if more than one tunnel types set. */ - for (j = 0; j != RTE_DIM(rxq_ctrl->tunnel_types); ++j) { - if (j == tunnel) - continue; - if (rxq_ctrl->tunnel_types[j] > 0) { - rxq_data->tunnel = 0; - break; + if (mark) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n++; + } + if (tunnel) { + unsigned int j; + + /* Increase the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]++; + break; + } } + mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); } - if (j == RTE_DIM(rxq_ctrl->tunnel_types)) - rxq_data->tunnel = flow->tunnel; } } /** - * Dump flow hash RX queue detail. + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * @p flow if no other flow uses it with the same kind of request. * * @param dev * Pointer to Ethernet device. - * @param flow - * Pointer to the rte_flow. - * @param hrxq_idx - * Hash RX queue index. + * @param[in] flow + * Pointer to the flow. */ static void -mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused, - struct rte_flow *flow __rte_unused, - unsigned int hrxq_idx __rte_unused) +mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) { -#ifndef NDEBUG - uintptr_t spec_ptr; - uint16_t j; - char buf[256]; - uint8_t off; - uint64_t extra_hash_fields = 0; + struct priv *priv = dev->data->dev_private; + const int mark = !!(flow->modifier & + (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)); + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + unsigned int i; -#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - if (flow->tunnel && flow->rss_conf.level > 1) - extra_hash_fields = (uint32_t)IBV_RX_HASH_INNER; -#endif - spec_ptr = (uintptr_t)(flow->frxq[hrxq_idx].ibv_attr + 1); - for (j = 0, off = 0; j < flow->frxq[hrxq_idx].ibv_attr->num_of_specs; - j++) { - struct ibv_flow_spec *spec = (void *)spec_ptr; - off += sprintf(buf + off, " %x(%hu)", spec->hdr.type, - spec->hdr.size); - spec_ptr += spec->hdr.size; - } - DRV_LOG(DEBUG, - "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p," - " hash:%" PRIx64 "/%u specs:%hhu(%hu), priority:%hu, type:%d," - " flags:%x, comp_mask:%x specs:%s", - dev->data->port_id, (void *)flow, hrxq_idx, - (void *)flow->frxq[hrxq_idx].hrxq, - (void *)flow->frxq[hrxq_idx].hrxq->qp, - (void *)flow->frxq[hrxq_idx].hrxq->ind_table, - (flow->frxq[hrxq_idx].hash_fields | extra_hash_fields), - flow->rss_conf.queue_num, - flow->frxq[hrxq_idx].ibv_attr->num_of_specs, - flow->frxq[hrxq_idx].ibv_attr->size, - flow->frxq[hrxq_idx].ibv_attr->priority, - flow->frxq[hrxq_idx].ibv_attr->type, - flow->frxq[hrxq_idx].ibv_attr->flags, - flow->frxq[hrxq_idx].ibv_attr->comp_mask, - buf); -#endif + assert(dev->data->dev_started); + for (i = 0; i != flow->rss.queue_num; ++i) { + int idx = (*flow->queue)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + if (mark) { + rxq_ctrl->flow_mark_n--; + rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; + } + if (tunnel) { + unsigned int j; + + /* Decrease the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]--; + break; + } + } + mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + } + } } /** - * Complete flow rule creation. + * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. * * @param dev * Pointer to Ethernet device. - * @param parser - * Internal parser structure. - * @param flow - * Pointer to the rte_flow. + */ +static void +mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + unsigned int j; + + if (!(*priv->rxqs)[i]) + continue; + rxq_ctrl = container_of((*priv->rxqs)[i], + struct mlx5_rxq_ctrl, rxq); + rxq_ctrl->flow_mark_n = 0; + rxq_ctrl->rxq.mark = 0; + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) + rxq_ctrl->flow_tunnels_n[j] = 0; + rxq_ctrl->rxq.tunnel = 0; + } +} + +/** + * Validate a flow supported by the NIC. + * + * @see rte_flow_validate() + * @see rte_flow_ops + */ +int +mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error); + + if (ret < 0) + return ret; + return 0; +} + +/** + * Remove the flow. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_flow_verbs *verbs; + + if (flow->nl_flow && priv->mnl_socket) + mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL); + LIST_FOREACH(verbs, &flow->verbs, next) { + if (verbs->flow) { + claim_zero(mlx5_glue->destroy_flow(verbs->flow)); + verbs->flow = NULL; + } + if (verbs->hrxq) { + if (flow->fate & MLX5_FLOW_FATE_DROP) + mlx5_hrxq_drop_release(dev); + else + mlx5_hrxq_release(dev, verbs->hrxq); + verbs->hrxq = NULL; + } + } + if (flow->counter) { + mlx5_flow_counter_release(flow->counter); + flow->counter = NULL; + } +} + +/** + * Apply the flow. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. * @param[out] error - * Perform verbose error reporting if not NULL. + * Pointer to error structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_create_action_queue(struct rte_eth_dev *dev, - struct mlx5_flow_parse *parser, - struct rte_flow *flow, - struct rte_flow_error *error) +mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) { - struct priv *priv __rte_unused = dev->data->dev_private; - int ret; - unsigned int i; - unsigned int flows_n = 0; - - assert(priv->pd); - assert(priv->ctx); - assert(!parser->drop); - ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); - if (ret) - goto error; - if (parser->count) - flow->cs = parser->cs; - if (!dev->data->dev_started) - return 0; - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].hrxq) - continue; - flow->frxq[i].ibv_flow = - mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, - flow->frxq[i].ibv_attr); - mlx5_flow_dump(dev, flow, i); - if (!flow->frxq[i].ibv_flow) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "flow rule creation failure"); + struct priv *priv = dev->data->dev_private; + struct mlx5_flow_verbs *verbs; + int err; + + LIST_FOREACH(verbs, &flow->verbs, next) { + if (flow->fate & MLX5_FLOW_FATE_DROP) { + verbs->hrxq = mlx5_hrxq_drop_new(dev); + if (!verbs->hrxq) { + rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot get drop hash queue"); + goto error; + } + } else { + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_hrxq_get(dev, flow->key, + MLX5_RSS_HASH_KEY_LEN, + verbs->hash_fields, + (*flow->queue), + flow->rss.queue_num); + if (!hrxq) + hrxq = mlx5_hrxq_new(dev, flow->key, + MLX5_RSS_HASH_KEY_LEN, + verbs->hash_fields, + (*flow->queue), + flow->rss.queue_num, + !!(flow->layers & + MLX5_FLOW_LAYER_TUNNEL)); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot get hash queue"); + goto error; + } + verbs->hrxq = hrxq; + } + verbs->flow = + mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr); + if (!verbs->flow) { + rte_flow_error_set(error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "hardware refuses to create flow"); goto error; } - ++flows_n; } - if (!flows_n) { - rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "internal error in flow creation"); + if (flow->nl_flow && + priv->mnl_socket && + mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error)) goto error; - } - mlx5_flow_create_update_rxqs(dev, flow); return 0; error: - ret = rte_errno; /* Save rte_errno before cleanup. */ - assert(flow); - for (i = 0; i != hash_rxq_init_n; ++i) { - if (flow->frxq[i].ibv_flow) { - struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow; - - claim_zero(mlx5_glue->destroy_flow(ibv_flow)); + err = rte_errno; /* Save rte_errno before cleanup. */ + LIST_FOREACH(verbs, &flow->verbs, next) { + if (verbs->hrxq) { + if (flow->fate & MLX5_FLOW_FATE_DROP) + mlx5_hrxq_drop_release(dev); + else + mlx5_hrxq_release(dev, verbs->hrxq); + verbs->hrxq = NULL; } - if (flow->frxq[i].hrxq) - mlx5_hrxq_release(dev, flow->frxq[i].hrxq); - if (flow->frxq[i].ibv_attr) - rte_free(flow->frxq[i].ibv_attr); - } - if (flow->cs) { - claim_zero(mlx5_glue->destroy_counter_set(flow->cs)); - flow->cs = NULL; - parser->cs = NULL; } - rte_errno = ret; /* Restore rte_errno. */ + rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } /** - * Convert a flow. + * Create a flow and add it to @p list. * * @param dev * Pointer to Ethernet device. @@ -2441,7 +2998,7 @@ error: * Pointer to a TAILQ flow list. * @param[in] attr * Flow rule attributes. - * @param[in] pattern + * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END action). @@ -2459,81 +3016,43 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct mlx5_flow_parse parser = { .create = 1, }; struct rte_flow *flow = NULL; - unsigned int i; + size_t size = 0; int ret; - ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser); - if (ret) - goto exit; - flow = rte_calloc(__func__, 1, - sizeof(*flow) + - parser.rss_conf.queue_num * sizeof(uint16_t), - 0); + ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error); + if (ret < 0) + return NULL; + size = ret; + flow = rte_calloc(__func__, 1, size, 0); if (!flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate flow memory"); + "not enough memory to create flow"); return NULL; } - /* Copy configuration. */ - flow->queues = (uint16_t (*)[])(flow + 1); - flow->tunnel = parser.tunnel; - flow->rss_conf = (struct rte_flow_action_rss){ - .func = RTE_ETH_HASH_FUNCTION_DEFAULT, - .level = parser.rss_conf.level, - .types = parser.rss_conf.types, - .key_len = parser.rss_conf.key_len, - .queue_num = parser.rss_conf.queue_num, - .key = memcpy(flow->rss_key, parser.rss_conf.key, - sizeof(*parser.rss_conf.key) * - parser.rss_conf.key_len), - .queue = memcpy(flow->queues, parser.rss_conf.queue, - sizeof(*parser.rss_conf.queue) * - parser.rss_conf.queue_num), - }; - flow->mark = parser.mark; - /* finalise the flow. */ - if (parser.drop) - ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow, - error); - else - ret = mlx5_flow_create_action_queue(dev, &parser, flow, error); - if (ret) - goto exit; + ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error); + if (ret < 0) { + rte_free(flow); + return NULL; + } + assert((size_t)ret == size); + if (dev->data->dev_started) { + ret = mlx5_flow_apply(dev, flow, error); + if (ret < 0) { + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (flow) { + mlx5_flow_remove(dev, flow); + rte_free(flow); + } + rte_errno = ret; /* Restore rte_errno. */ + return NULL; + } + } TAILQ_INSERT_TAIL(list, flow, next); - DRV_LOG(DEBUG, "port %u flow created %p", dev->data->port_id, - (void *)flow); + mlx5_flow_rxq_flags_set(dev, flow); return flow; -exit: - DRV_LOG(ERR, "port %u flow creation error: %s", dev->data->port_id, - error->message); - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser.queue[i].ibv_attr) - rte_free(parser.queue[i].ibv_attr); - } - rte_free(flow); - return NULL; -} - -/** - * Validate a flow supported by the NIC. - * - * @see rte_flow_validate() - * @see rte_flow_ops - */ -int -mlx5_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - struct mlx5_flow_parse parser = { .create = 0, }; - - return mlx5_flow_convert(dev, attr, items, actions, error, &parser); } /** @@ -2549,10 +3068,9 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - - return mlx5_flow_list_create(dev, &priv->flows, attr, items, actions, - error); + return mlx5_flow_list_create + (dev, &((struct priv *)dev->data->dev_private)->flows, + attr, items, actions, error); } /** @@ -2569,95 +3087,14 @@ static void mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, struct rte_flow *flow) { - struct priv *priv = dev->data->dev_private; - unsigned int i; - - if (flow->drop || !dev->data->dev_started) - goto free; - for (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) { - /* Update queue tunnel type. */ - struct mlx5_rxq_data *rxq_data = (*priv->rxqs) - [(*flow->queues)[i]]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - uint8_t tunnel = PTYPE_IDX(flow->tunnel); - - assert(rxq_ctrl->tunnel_types[tunnel] > 0); - rxq_ctrl->tunnel_types[tunnel] -= 1; - if (!rxq_ctrl->tunnel_types[tunnel]) { - /* Update tunnel type. */ - uint8_t j; - uint8_t types = 0; - uint8_t last; - - for (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++) - if (rxq_ctrl->tunnel_types[j]) { - types += 1; - last = j; - } - /* Keep same if more than one tunnel types left. */ - if (types == 1) - rxq_data->tunnel = ptype_ext[last]; - else if (types == 0) - /* No tunnel type left. */ - rxq_data->tunnel = 0; - } - } - for (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) { - struct rte_flow *tmp; - int mark = 0; - - /* - * To remove the mark from the queue, the queue must not be - * present in any other marked flow (RSS or not). - */ - TAILQ_FOREACH(tmp, list, next) { - unsigned int j; - uint16_t *tqs = NULL; - uint16_t tq_n = 0; - - if (!tmp->mark) - continue; - for (j = 0; j != hash_rxq_init_n; ++j) { - if (!tmp->frxq[j].hrxq) - continue; - tqs = tmp->frxq[j].hrxq->ind_table->queues; - tq_n = tmp->frxq[j].hrxq->ind_table->queues_n; - } - if (!tq_n) - continue; - for (j = 0; (j != tq_n) && !mark; j++) - if (tqs[j] == (*flow->queues)[i]) - mark = 1; - } - (*priv->rxqs)[(*flow->queues)[i]]->mark = mark; - } -free: - if (flow->drop) { - if (flow->frxq[HASH_RXQ_ETH].ibv_flow) - claim_zero(mlx5_glue->destroy_flow - (flow->frxq[HASH_RXQ_ETH].ibv_flow)); - rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) { - struct mlx5_flow *frxq = &flow->frxq[i]; - - if (frxq->ibv_flow) - claim_zero(mlx5_glue->destroy_flow - (frxq->ibv_flow)); - if (frxq->hrxq) - mlx5_hrxq_release(dev, frxq->hrxq); - if (frxq->ibv_attr) - rte_free(frxq->ibv_attr); - } - } - if (flow->cs) { - claim_zero(mlx5_glue->destroy_counter_set(flow->cs)); - flow->cs = NULL; - } + mlx5_flow_remove(dev, flow); TAILQ_REMOVE(list, flow, next); - DRV_LOG(DEBUG, "port %u flow destroyed %p", dev->data->port_id, - (void *)flow); + /* + * Update RX queue flags only if port is started, otherwise it is + * already clean. + */ + if (dev->data->dev_started) + mlx5_flow_rxq_flags_trim(dev, flow); rte_free(flow); } @@ -2681,135 +3118,6 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) } /** - * Create drop queue. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -int -mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) -{ - struct priv *priv = dev->data->dev_private; - struct mlx5_hrxq_drop *fdq = NULL; - - assert(priv->pd); - assert(priv->ctx); - fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); - if (!fdq) { - DRV_LOG(WARNING, - "port %u cannot allocate memory for drop queue", - dev->data->port_id); - rte_errno = ENOMEM; - return -rte_errno; - } - fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); - if (!fdq->cq) { - DRV_LOG(WARNING, "port %u cannot allocate CQ for drop queue", - dev->data->port_id); - rte_errno = errno; - goto error; - } - fdq->wq = mlx5_glue->create_wq - (priv->ctx, - &(struct ibv_wq_init_attr){ - .wq_type = IBV_WQT_RQ, - .max_wr = 1, - .max_sge = 1, - .pd = priv->pd, - .cq = fdq->cq, - }); - if (!fdq->wq) { - DRV_LOG(WARNING, "port %u cannot allocate WQ for drop queue", - dev->data->port_id); - rte_errno = errno; - goto error; - } - fdq->ind_table = mlx5_glue->create_rwq_ind_table - (priv->ctx, - &(struct ibv_rwq_ind_table_init_attr){ - .log_ind_tbl_size = 0, - .ind_tbl = &fdq->wq, - .comp_mask = 0, - }); - if (!fdq->ind_table) { - DRV_LOG(WARNING, - "port %u cannot allocate indirection table for drop" - " queue", - dev->data->port_id); - rte_errno = errno; - goto error; - } - fdq->qp = mlx5_glue->create_qp_ex - (priv->ctx, - &(struct ibv_qp_init_attr_ex){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH, - .rx_hash_conf = (struct ibv_rx_hash_conf){ - .rx_hash_function = - IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_hash_default_key_len, - .rx_hash_key = rss_hash_default_key, - .rx_hash_fields_mask = 0, - }, - .rwq_ind_tbl = fdq->ind_table, - .pd = priv->pd - }); - if (!fdq->qp) { - DRV_LOG(WARNING, "port %u cannot allocate QP for drop queue", - dev->data->port_id); - rte_errno = errno; - goto error; - } - priv->flow_drop_queue = fdq; - return 0; -error: - if (fdq->qp) - claim_zero(mlx5_glue->destroy_qp(fdq->qp)); - if (fdq->ind_table) - claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table)); - if (fdq->wq) - claim_zero(mlx5_glue->destroy_wq(fdq->wq)); - if (fdq->cq) - claim_zero(mlx5_glue->destroy_cq(fdq->cq)); - if (fdq) - rte_free(fdq); - priv->flow_drop_queue = NULL; - return -rte_errno; -} - -/** - * Delete drop queue. - * - * @param dev - * Pointer to Ethernet device. - */ -void -mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev) -{ - struct priv *priv = dev->data->dev_private; - struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue; - - if (!fdq) - return; - if (fdq->qp) - claim_zero(mlx5_glue->destroy_qp(fdq->qp)); - if (fdq->ind_table) - claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table)); - if (fdq->wq) - claim_zero(mlx5_glue->destroy_wq(fdq->wq)); - if (fdq->cq) - claim_zero(mlx5_glue->destroy_cq(fdq->cq)); - rte_free(fdq); - priv->flow_drop_queue = NULL; -} - -/** * Remove all flows. * * @param dev @@ -2820,68 +3128,11 @@ mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev) void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) { - struct priv *priv = dev->data->dev_private; struct rte_flow *flow; - unsigned int i; - TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { - struct mlx5_ind_table_ibv *ind_tbl = NULL; - - if (flow->drop) { - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) - continue; - claim_zero(mlx5_glue->destroy_flow - (flow->frxq[HASH_RXQ_ETH].ibv_flow)); - flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; - DRV_LOG(DEBUG, "port %u flow %p removed", - dev->data->port_id, (void *)flow); - /* Next flow. */ - continue; - } - /* Verify the flow has not already been cleaned. */ - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].ibv_flow) - continue; - /* - * Indirection table may be necessary to remove the - * flags in the Rx queues. - * This helps to speed-up the process by avoiding - * another loop. - */ - ind_tbl = flow->frxq[i].hrxq->ind_table; - break; - } - if (i == hash_rxq_init_n) - return; - if (flow->mark) { - assert(ind_tbl); - for (i = 0; i != ind_tbl->queues_n; ++i) - (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0; - } - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].ibv_flow) - continue; - claim_zero(mlx5_glue->destroy_flow - (flow->frxq[i].ibv_flow)); - flow->frxq[i].ibv_flow = NULL; - mlx5_hrxq_release(dev, flow->frxq[i].hrxq); - flow->frxq[i].hrxq = NULL; - } - DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id, - (void *)flow); - } - /* Cleanup Rx queue tunnel info. */ - for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_data *q = (*priv->rxqs)[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(q, struct mlx5_rxq_ctrl, rxq); - - if (!q) - continue; - memset((void *)rxq_ctrl->tunnel_types, 0, - sizeof(rxq_ctrl->tunnel_types)); - q->tunnel = 0; - } + TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) + mlx5_flow_remove(dev, flow); + mlx5_flow_rxq_flags_clear(dev); } /** @@ -2898,75 +3149,22 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) { - struct priv *priv = dev->data->dev_private; struct rte_flow *flow; + struct rte_flow_error error; + int ret = 0; TAILQ_FOREACH(flow, list, next) { - unsigned int i; - - if (flow->drop) { - flow->frxq[HASH_RXQ_ETH].ibv_flow = - mlx5_glue->create_flow - (priv->flow_drop_queue->qp, - flow->frxq[HASH_RXQ_ETH].ibv_attr); - if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { - DRV_LOG(DEBUG, - "port %u flow %p cannot be applied", - dev->data->port_id, (void *)flow); - rte_errno = EINVAL; - return -rte_errno; - } - DRV_LOG(DEBUG, "port %u flow %p applied", - dev->data->port_id, (void *)flow); - /* Next flow. */ - continue; - } - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].ibv_attr) - continue; - flow->frxq[i].hrxq = - mlx5_hrxq_get(dev, flow->rss_conf.key, - flow->rss_conf.key_len, - flow->frxq[i].hash_fields, - flow->rss_conf.queue, - flow->rss_conf.queue_num, - flow->tunnel, - flow->rss_conf.level); - if (flow->frxq[i].hrxq) - goto flow_create; - flow->frxq[i].hrxq = - mlx5_hrxq_new(dev, flow->rss_conf.key, - flow->rss_conf.key_len, - flow->frxq[i].hash_fields, - flow->rss_conf.queue, - flow->rss_conf.queue_num, - flow->tunnel, - flow->rss_conf.level); - if (!flow->frxq[i].hrxq) { - DRV_LOG(DEBUG, - "port %u flow %p cannot create hash" - " rxq", - dev->data->port_id, (void *)flow); - rte_errno = EINVAL; - return -rte_errno; - } -flow_create: - mlx5_flow_dump(dev, flow, i); - flow->frxq[i].ibv_flow = - mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, - flow->frxq[i].ibv_attr); - if (!flow->frxq[i].ibv_flow) { - DRV_LOG(DEBUG, - "port %u flow %p type %u cannot be" - " applied", - dev->data->port_id, (void *)flow, i); - rte_errno = EINVAL; - return -rte_errno; - } - } - mlx5_flow_create_update_rxqs(dev, flow); + ret = mlx5_flow_apply(dev, flow, &error); + if (ret < 0) + goto error; + mlx5_flow_rxq_flags_set(dev, flow); } return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_stop(dev, list); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -3019,7 +3217,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; const struct rte_flow_attr attr = { .ingress = 1, - .priority = MLX5_CTRL_FLOW_PRIORITY, + .priority = MLX5_FLOW_PRIO_RSVD, }; struct rte_flow_item items[] = { { @@ -3129,49 +3327,88 @@ mlx5_flow_flush(struct rte_eth_dev *dev, return 0; } -#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT +/** + * Isolated mode. + * + * @see rte_flow_isolate() + * @see rte_flow_ops + */ +int +mlx5_flow_isolate(struct rte_eth_dev *dev, + int enable, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + + if (dev->data->dev_started) { + rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port must be stopped first"); + return -rte_errno; + } + priv->isolated = !!enable; + if (enable) + dev->dev_ops = &mlx5_dev_ops_isolate; + else + dev->dev_ops = &mlx5_dev_ops; + return 0; +} + /** * Query flow counter. * - * @param cs - * the counter set. - * @param counter_value - * returned data from the counter. + * @param flow + * Pointer to the flow. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_query_count(struct ibv_counter_set *cs, - struct mlx5_flow_counter_stats *counter_stats, - struct rte_flow_query_count *query_count, +mlx5_flow_query_count(struct rte_flow *flow __rte_unused, + void *data __rte_unused, struct rte_flow_error *error) { - uint64_t counters[2]; - struct ibv_query_counter_set_attr query_cs_attr = { - .cs = cs, - .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, - }; - struct ibv_counter_set_data query_out = { - .out = counters, - .outlen = 2 * sizeof(uint64_t), - }; - int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out); - - if (err) - return rte_flow_error_set(error, err, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot read counter"); - query_count->hits_set = 1; - query_count->bytes_set = 1; - query_count->hits = counters[0] - counter_stats->hits; - query_count->bytes = counters[1] - counter_stats->bytes; - if (query_count->reset) { - counter_stats->hits = counters[0]; - counter_stats->bytes = counters[1]; +#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + if (flow->modifier & MLX5_FLOW_MOD_COUNT) { + struct rte_flow_query_count *qc = data; + uint64_t counters[2] = {0, 0}; + struct ibv_query_counter_set_attr query_cs_attr = { + .cs = flow->counter->cs, + .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, + }; + struct ibv_counter_set_data query_out = { + .out = counters, + .outlen = 2 * sizeof(uint64_t), + }; + int err = mlx5_glue->query_counter_set(&query_cs_attr, + &query_out); + + if (err) + return rte_flow_error_set + (error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counter"); + qc->hits_set = 1; + qc->bytes_set = 1; + qc->hits = counters[0] - flow->counter->hits; + qc->bytes = counters[1] - flow->counter->bytes; + if (qc->reset) { + flow->counter->hits = counters[0]; + flow->counter->bytes = counters[1]; + } + return 0; } - return 0; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow does not have counter"); +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not available"); } /** @@ -3183,54 +3420,28 @@ mlx5_flow_query_count(struct ibv_counter_set *cs, int mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow, - const struct rte_flow_action *action __rte_unused, + const struct rte_flow_action *actions, void *data, struct rte_flow_error *error) { - if (flow->cs) { - int ret; + int ret = 0; - ret = mlx5_flow_query_count(flow->cs, - &flow->counter_stats, - (struct rte_flow_query_count *)data, - error); - if (ret) + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = mlx5_flow_query_count(flow, data, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + if (ret < 0) return ret; - } else { - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "no counter found for flow"); - } - return 0; -} -#endif - -/** - * Isolated mode. - * - * @see rte_flow_isolate() - * @see rte_flow_ops - */ -int -mlx5_flow_isolate(struct rte_eth_dev *dev, - int enable, - struct rte_flow_error *error) -{ - struct priv *priv = dev->data->dev_private; - - if (dev->data->dev_started) { - rte_flow_error_set(error, EBUSY, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "port must be stopped first"); - return -rte_errno; } - priv->isolated = !!enable; - if (enable) - dev->dev_ops = &mlx5_dev_ops_isolate; - else - dev->dev_ops = &mlx5_dev_ops; return 0; } @@ -3445,9 +3656,6 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, .type = 0, }, }; - struct mlx5_flow_parse parser = { - .layer = HASH_RXQ_ETH, - }; struct rte_flow_error error; struct rte_flow *flow; int ret; @@ -3455,10 +3663,6 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) return ret; - ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, - attributes.actions, &error, &parser); - if (ret) - return ret; flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, attributes.items, attributes.actions, &error); @@ -3482,94 +3686,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_fdir_filter_delete(struct rte_eth_dev *dev, - const struct rte_eth_fdir_filter *fdir_filter) +mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused, + const struct rte_eth_fdir_filter *fdir_filter + __rte_unused) { - struct priv *priv = dev->data->dev_private; - struct mlx5_fdir attributes = { - .attr.group = 0, - }; - struct mlx5_flow_parse parser = { - .create = 1, - .layer = HASH_RXQ_ETH, - }; - struct rte_flow_error error; - struct rte_flow *flow; - unsigned int i; - int ret; - - ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); - if (ret) - return ret; - ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, - attributes.actions, &error, &parser); - if (ret) - goto exit; - /* - * Special case for drop action which is only set in the - * specifications when the flow is created. In this situation the - * drop specification is missing. - */ - if (parser.drop) { - struct ibv_flow_spec_action_drop *drop; - - drop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr + - parser.queue[HASH_RXQ_ETH].offset); - *drop = (struct ibv_flow_spec_action_drop){ - .type = IBV_FLOW_SPEC_ACTION_DROP, - .size = sizeof(struct ibv_flow_spec_action_drop), - }; - parser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++; - } - TAILQ_FOREACH(flow, &priv->flows, next) { - struct ibv_flow_attr *attr; - struct ibv_spec_header *attr_h; - void *spec; - struct ibv_flow_attr *flow_attr; - struct ibv_spec_header *flow_h; - void *flow_spec; - unsigned int specs_n; - unsigned int queue_id = parser.drop ? HASH_RXQ_ETH : - parser.layer; - - attr = parser.queue[queue_id].ibv_attr; - flow_attr = flow->frxq[queue_id].ibv_attr; - /* Compare first the attributes. */ - if (!flow_attr || - memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr))) - continue; - if (attr->num_of_specs == 0) - continue; - spec = (void *)((uintptr_t)attr + - sizeof(struct ibv_flow_attr)); - flow_spec = (void *)((uintptr_t)flow_attr + - sizeof(struct ibv_flow_attr)); - specs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs); - for (i = 0; i != specs_n; ++i) { - attr_h = spec; - flow_h = flow_spec; - if (memcmp(spec, flow_spec, - RTE_MIN(attr_h->size, flow_h->size))) - goto wrong_flow; - spec = (void *)((uintptr_t)spec + attr_h->size); - flow_spec = (void *)((uintptr_t)flow_spec + - flow_h->size); - } - /* At this point, the flow match. */ - break; -wrong_flow: - /* The flow does not match. */ - continue; - } - ret = rte_errno; /* Save rte_errno before cleanup. */ - if (flow) - mlx5_flow_list_destroy(dev, &priv->flows, flow); -exit: - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser.queue[i].ibv_attr) - rte_free(parser.queue[i].ibv_attr); - } - rte_errno = ret; /* Restore rte_errno. */ + rte_errno = ENOTSUP; return -rte_errno; } @@ -3725,56 +3846,3 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, } return 0; } - -/** - * Detect number of Verbs flow priorities supported. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * number of supported Verbs flow priority. - */ -unsigned int -mlx5_get_max_verbs_prio(struct rte_eth_dev *dev) -{ - struct priv *priv = dev->data->dev_private; - unsigned int verb_priorities = MLX5_VERBS_FLOW_PRIO_8; - struct { - struct ibv_flow_attr attr; - struct ibv_flow_spec_eth eth; - struct ibv_flow_spec_action_drop drop; - } flow_attr = { - .attr = { - .num_of_specs = 2, - }, - .eth = { - .type = IBV_FLOW_SPEC_ETH, - .size = sizeof(struct ibv_flow_spec_eth), - }, - .drop = { - .size = sizeof(struct ibv_flow_spec_action_drop), - .type = IBV_FLOW_SPEC_ACTION_DROP, - }, - }; - struct ibv_flow *flow; - - do { - flow_attr.attr.priority = verb_priorities - 1; - flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp, - &flow_attr.attr); - if (flow) { - claim_zero(mlx5_glue->destroy_flow(flow)); - /* Try more priorities. */ - verb_priorities *= 2; - } else { - /* Failed, restore last right number. */ - verb_priorities /= 2; - break; - } - } while (1); - DRV_LOG(DEBUG, "port %u Verbs flow priorities: %d," - " user flow priorities: %d", - dev->data->port_id, verb_priorities, MLX5_CTRL_FLOW_PRIORITY); - return verb_priorities; -} diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c index c7965e51..84f9492a 100644 --- a/drivers/net/mlx5/mlx5_glue.c +++ b/drivers/net/mlx5/mlx5_glue.c @@ -4,6 +4,7 @@ */ #include <errno.h> +#include <stdalign.h> #include <stddef.h> #include <stdint.h> @@ -23,6 +24,8 @@ #pragma GCC diagnostic error "-Wpedantic" #endif +#include <rte_config.h> + #include "mlx5_autoconf.h" #include "mlx5_glue.h" @@ -343,6 +346,7 @@ mlx5_glue_dv_create_qp(struct ibv_context *context, #endif } +alignas(RTE_CACHE_LINE_SIZE) const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){ .version = MLX5_GLUE_VERSION, .fork_init = mlx5_glue_fork_init, diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 672a4761..12ee37f5 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -49,7 +49,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]) struct ifreq request; int ret; - ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request); + ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request, 0); if (ret) return ret; memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 08105a44..1d1bcb5f 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -198,9 +198,8 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket) 0, socket); if (bt->table == NULL) { rte_errno = ENOMEM; - DRV_LOG(ERR, - "failed to allocate memory for btree cache on socket %d", - socket); + DEBUG("failed to allocate memory for btree cache on socket %d", + socket); return -rte_errno; } bt->size = n; @@ -208,8 +207,8 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket) (*bt->table)[bt->len++] = (struct mlx5_mr_cache) { .lkey = UINT32_MAX, }; - DRV_LOG(DEBUG, "initialized B-tree %p with table %p", - (void *)bt, (void *)bt->table); + DEBUG("initialized B-tree %p with table %p", + (void *)bt, (void *)bt->table); return 0; } @@ -224,8 +223,8 @@ mlx5_mr_btree_free(struct mlx5_mr_btree *bt) { if (bt == NULL) return; - DRV_LOG(DEBUG, "freeing B-tree %p with table %p", - (void *)bt, (void *)bt->table); + DEBUG("freeing B-tree %p with table %p", + (void *)bt, (void *)bt->table); rte_free(bt->table); memset(bt, 0, sizeof(*bt)); } @@ -236,9 +235,10 @@ mlx5_mr_btree_free(struct mlx5_mr_btree *bt) * @param bt * Pointer to B-tree structure. */ -static void -mlx5_mr_btree_dump(struct mlx5_mr_btree *bt) +void +mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused) { +#ifndef NDEBUG int idx; struct mlx5_mr_cache *lkp_tbl; @@ -248,11 +248,11 @@ mlx5_mr_btree_dump(struct mlx5_mr_btree *bt) for (idx = 0; idx < bt->len; ++idx) { struct mlx5_mr_cache *entry = &lkp_tbl[idx]; - DRV_LOG(DEBUG, - "B-tree(%p)[%u]," - " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", - (void *)bt, idx, entry->start, entry->end, entry->lkey); + DEBUG("B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); } +#endif } /** @@ -576,11 +576,10 @@ alloc_resources: assert(msl->page_sz == ms->hugepage_sz); /* Number of memsegs in the range. */ ms_n = len / msl->page_sz; - DRV_LOG(DEBUG, - "port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," - " page_sz=0x%" PRIx64 ", ms_n=%u", - dev->data->port_id, (void *)addr, - data.start, data.end, msl->page_sz, ms_n); + DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " page_sz=0x%" PRIx64 ", ms_n=%u", + dev->data->port_id, (void *)addr, + data.start, data.end, msl->page_sz, ms_n); /* Size of memory for bitmap. */ bmp_size = rte_bitmap_get_memory_footprint(ms_n); mr = rte_zmalloc_socket(NULL, @@ -589,10 +588,9 @@ alloc_resources: bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id); if (mr == NULL) { - DRV_LOG(WARNING, - "port %u unable to allocate memory for a new MR of" - " address (%p).", - dev->data->port_id, (void *)addr); + DEBUG("port %u unable to allocate memory for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); rte_errno = ENOMEM; goto err_nolock; } @@ -606,10 +604,9 @@ alloc_resources: bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); if (mr->ms_bmp == NULL) { - DRV_LOG(WARNING, - "port %u unable to initialize bitamp for a new MR of" - " address (%p).", - dev->data->port_id, (void *)addr); + DEBUG("port %u unable to initialize bitamp for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); rte_errno = EINVAL; goto err_nolock; } @@ -625,11 +622,10 @@ alloc_resources: data_re = data; if (len > msl->page_sz && !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { - DRV_LOG(WARNING, - "port %u unable to find virtually contiguous" - " chunk for address (%p)." - " rte_memseg_contig_walk() failed.", - dev->data->port_id, (void *)addr); + DEBUG("port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); rte_errno = ENXIO; goto err_memlock; } @@ -657,9 +653,8 @@ alloc_resources: * here again. */ mr_btree_insert(&priv->mr.cache, entry); - DRV_LOG(DEBUG, - "port %u found MR for %p on final lookup, abort", - dev->data->port_id, (void *)addr); + DEBUG("port %u found MR for %p on final lookup, abort", + dev->data->port_id, (void *)addr); rte_rwlock_write_unlock(&priv->mr.rwlock); rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); /* @@ -707,22 +702,20 @@ alloc_resources: mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len, IBV_ACCESS_LOCAL_WRITE); if (mr->ibv_mr == NULL) { - DRV_LOG(WARNING, - "port %u fail to create a verbs MR for address (%p)", - dev->data->port_id, (void *)addr); + DEBUG("port %u fail to create a verbs MR for address (%p)", + dev->data->port_id, (void *)addr); rte_errno = EINVAL; goto err_mrlock; } assert((uintptr_t)mr->ibv_mr->addr == data.start); assert(mr->ibv_mr->length == len); LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); - DRV_LOG(DEBUG, - "port %u MR CREATED (%p) for %p:\n" - " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," - " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", - dev->data->port_id, (void *)mr, (void *)addr, - data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey), - mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); + DEBUG("port %u MR CREATED (%p) for %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", + dev->data->port_id, (void *)mr, (void *)addr, + data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); /* Insert to the global cache table. */ mr_insert_dev_cache(dev, mr); /* Fill in output data. */ @@ -797,8 +790,8 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) int i; int rebuild = 0; - DRV_LOG(DEBUG, "port %u free callback: addr=%p, len=%zu", - dev->data->port_id, addr, len); + DEBUG("port %u free callback: addr=%p, len=%zu", + dev->data->port_id, addr, len); msl = rte_mem_virt2memseg_list(addr); /* addr and len must be page-aligned. */ assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz)); @@ -825,14 +818,14 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) pos = ms_idx - mr->ms_base_idx; assert(rte_bitmap_get(mr->ms_bmp, pos)); assert(pos < mr->ms_bmp_n); - DRV_LOG(DEBUG, "port %u MR(%p): clear bitmap[%u] for addr %p", - dev->data->port_id, (void *)mr, pos, (void *)start); + DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p", + dev->data->port_id, (void *)mr, pos, (void *)start); rte_bitmap_clear(mr->ms_bmp, pos); if (--mr->ms_n == 0) { LIST_REMOVE(mr, mr); LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); - DRV_LOG(DEBUG, "port %u remove MR(%p) from list", - dev->data->port_id, (void *)mr); + DEBUG("port %u remove MR(%p) from list", + dev->data->port_id, (void *)mr); } /* * MR is fragmented or will be freed. the global cache must be @@ -852,13 +845,11 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) * before the core sees the newly allocated memory. */ ++priv->mr.dev_gen; - DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", - priv->mr.dev_gen); + DEBUG("broadcasting local cache flush, gen=%d", + priv->mr.dev_gen); rte_smp_wmb(); } rte_rwlock_write_unlock(&priv->mr.rwlock); - if (rebuild && rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG) - mlx5_mr_dump_dev(dev); } /** @@ -1123,8 +1114,9 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, * Pointer to Ethernet device. */ void -mlx5_mr_dump_dev(struct rte_eth_dev *dev) +mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused) { +#ifndef NDEBUG struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; int mr_n = 0; @@ -1135,11 +1127,10 @@ mlx5_mr_dump_dev(struct rte_eth_dev *dev) LIST_FOREACH(mr, &priv->mr.mr_list, mr) { unsigned int n; - DRV_LOG(DEBUG, - "port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", - dev->data->port_id, mr_n++, - rte_cpu_to_be_32(mr->ibv_mr->lkey), - mr->ms_n, mr->ms_bmp_n); + DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", + dev->data->port_id, mr_n++, + rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_n, mr->ms_bmp_n); if (mr->ms_n == 0) continue; for (n = 0; n < mr->ms_bmp_n; ) { @@ -1148,14 +1139,14 @@ mlx5_mr_dump_dev(struct rte_eth_dev *dev) n = mr_find_next_chunk(mr, &ret, n); if (!ret.end) break; - DRV_LOG(DEBUG, - " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", - chunk_n++, ret.start, ret.end); + DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", + chunk_n++, ret.start, ret.end); } } - DRV_LOG(DEBUG, "port %u dumping global cache", dev->data->port_id); + DEBUG("port %u dumping global cache", dev->data->port_id); mlx5_mr_btree_dump(&priv->mr.cache); rte_rwlock_read_unlock(&priv->mr.rwlock); +#endif } /** diff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h index e0b28215..a57003fe 100644 --- a/drivers/net/mlx5/mlx5_mr.h +++ b/drivers/net/mlx5/mlx5_mr.h @@ -74,9 +74,12 @@ void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, size_t len, void *arg); int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, struct rte_mempool *mp); -void mlx5_mr_dump_dev(struct rte_eth_dev *dev); void mlx5_mr_release(struct rte_eth_dev *dev); +/* Debug purpose functions. */ +void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt); +void mlx5_mr_dump_dev(struct rte_eth_dev *dev); + /** * Look up LKey from given lookup table by linear search. Firstly look up the * last-hit entry. If miss, the entire array is searched. If found, update the diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c index dca85835..d61826ae 100644 --- a/drivers/net/mlx5/mlx5_nl.c +++ b/drivers/net/mlx5/mlx5_nl.c @@ -3,10 +3,21 @@ * Copyright 2018 Mellanox Technologies, Ltd */ +#include <errno.h> +#include <linux/if_link.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> +#include <net/if.h> +#include <rdma/rdma_netlink.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> +#include <sys/socket.h> #include <unistd.h> +#include <rte_errno.h> + #include "mlx5.h" #include "mlx5_utils.h" @@ -27,6 +38,40 @@ ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg)))) #endif +/* + * The following definitions are normally found in rdma/rdma_netlink.h, + * however they are so recent that most systems do not expose them yet. + */ +#ifndef HAVE_RDMA_NL_NLDEV +#define RDMA_NL_NLDEV 5 +#endif +#ifndef HAVE_RDMA_NLDEV_CMD_GET +#define RDMA_NLDEV_CMD_GET 1 +#endif +#ifndef HAVE_RDMA_NLDEV_CMD_PORT_GET +#define RDMA_NLDEV_CMD_PORT_GET 5 +#endif +#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_INDEX +#define RDMA_NLDEV_ATTR_DEV_INDEX 1 +#endif +#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_NAME +#define RDMA_NLDEV_ATTR_DEV_NAME 2 +#endif +#ifndef HAVE_RDMA_NLDEV_ATTR_PORT_INDEX +#define RDMA_NLDEV_ATTR_PORT_INDEX 3 +#endif +#ifndef HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX +#define RDMA_NLDEV_ATTR_NDEV_INDEX 50 +#endif + +/* These are normally found in linux/if_link.h. */ +#ifndef HAVE_IFLA_PHYS_SWITCH_ID +#define IFLA_PHYS_SWITCH_ID 36 +#endif +#ifndef HAVE_IFLA_PHYS_PORT_NAME +#define IFLA_PHYS_PORT_NAME 38 +#endif + /* Add/remove MAC address through Netlink */ struct mlx5_nl_mac_addr { struct ether_addr (*mac)[]; @@ -34,29 +79,35 @@ struct mlx5_nl_mac_addr { int mac_n; /**< Number of addresses in the array. */ }; +/** Data structure used by mlx5_nl_ifindex_cb(). */ +struct mlx5_nl_ifindex_data { + const char *name; /**< IB device name (in). */ + uint32_t ibindex; /**< IB device index (out). */ + uint32_t ifindex; /**< Network interface index (out). */ +}; + /** * Opens a Netlink socket. * - * @param nl_groups - * Netlink group value (e.g. RTMGRP_LINK). + * @param protocol + * Netlink protocol (e.g. NETLINK_ROUTE, NETLINK_RDMA). * * @return * A file descriptor on success, a negative errno value otherwise and * rte_errno is set. */ int -mlx5_nl_init(uint32_t nl_groups) +mlx5_nl_init(int protocol) { int fd; int sndbuf_size = MLX5_SEND_BUF_SIZE; int rcvbuf_size = MLX5_RECV_BUF_SIZE; struct sockaddr_nl local = { .nl_family = AF_NETLINK, - .nl_groups = nl_groups, }; int ret; - fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE); + fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol); if (fd == -1) { rte_errno = errno; return -rte_errno; @@ -311,7 +362,7 @@ mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[], int *mac_n) { struct priv *priv = dev->data->dev_private; - int iface_idx = mlx5_ifindex(dev); + unsigned int iface_idx = mlx5_ifindex(dev); struct { struct nlmsghdr hdr; struct ifinfomsg ifm; @@ -334,9 +385,9 @@ mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[], int ret; uint32_t sn = priv->nl_sn++; - if (priv->nl_socket == -1) + if (priv->nl_socket_route == -1) return 0; - fd = priv->nl_socket; + fd = priv->nl_socket_route; ret = mlx5_nl_request(fd, &req.hdr, sn, &req.ifm, sizeof(struct ifinfomsg)); if (ret < 0) @@ -370,7 +421,7 @@ mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac, int add) { struct priv *priv = dev->data->dev_private; - int iface_idx = mlx5_ifindex(dev); + unsigned int iface_idx = mlx5_ifindex(dev); struct { struct nlmsghdr hdr; struct ndmsg ndm; @@ -398,9 +449,9 @@ mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac, int ret; uint32_t sn = priv->nl_sn++; - if (priv->nl_socket == -1) + if (priv->nl_socket_route == -1) return 0; - fd = priv->nl_socket; + fd = priv->nl_socket_route; memcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN); req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) + RTA_ALIGN(req.rta.rta_len); @@ -549,7 +600,7 @@ static int mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable) { struct priv *priv = dev->data->dev_private; - int iface_idx = mlx5_ifindex(dev); + unsigned int iface_idx = mlx5_ifindex(dev); struct { struct nlmsghdr hdr; struct ifinfomsg ifi; @@ -569,9 +620,9 @@ mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable) int ret; assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI))); - if (priv->nl_socket < 0) + if (priv->nl_socket_route < 0) return 0; - fd = priv->nl_socket; + fd = priv->nl_socket_route; ret = mlx5_nl_send(fd, &req.hdr, priv->nl_sn++); if (ret < 0) return ret; @@ -625,3 +676,241 @@ mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable) strerror(rte_errno)); return ret; } + +/** + * Process network interface information from Netlink message. + * + * @param nh + * Pointer to Netlink message header. + * @param arg + * Opaque data pointer for this callback. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_ifindex_cb(struct nlmsghdr *nh, void *arg) +{ + struct mlx5_nl_ifindex_data *data = arg; + size_t off = NLMSG_HDRLEN; + uint32_t ibindex = 0; + uint32_t ifindex = 0; + int found = 0; + + if (nh->nlmsg_type != + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET) && + nh->nlmsg_type != + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET)) + goto error; + while (off < nh->nlmsg_len) { + struct nlattr *na = (void *)((uintptr_t)nh + off); + void *payload = (void *)((uintptr_t)na + NLA_HDRLEN); + + if (na->nla_len > nh->nlmsg_len - off) + goto error; + switch (na->nla_type) { + case RDMA_NLDEV_ATTR_DEV_INDEX: + ibindex = *(uint32_t *)payload; + break; + case RDMA_NLDEV_ATTR_DEV_NAME: + if (!strcmp(payload, data->name)) + found = 1; + break; + case RDMA_NLDEV_ATTR_NDEV_INDEX: + ifindex = *(uint32_t *)payload; + break; + default: + break; + } + off += NLA_ALIGN(na->nla_len); + } + if (found) { + data->ibindex = ibindex; + data->ifindex = ifindex; + } + return 0; +error: + rte_errno = EINVAL; + return -rte_errno; +} + +/** + * Get index of network interface associated with some IB device. + * + * This is the only somewhat safe method to avoid resorting to heuristics + * when faced with port representors. Unfortunately it requires at least + * Linux 4.17. + * + * @param nl + * Netlink socket of the RDMA kind (NETLINK_RDMA). + * @param[in] name + * IB device name. + * + * @return + * A valid (nonzero) interface index on success, 0 otherwise and rte_errno + * is set. + */ +unsigned int +mlx5_nl_ifindex(int nl, const char *name) +{ + static const uint32_t pindex = 1; + uint32_t seq = random(); + struct mlx5_nl_ifindex_data data = { + .name = name, + .ibindex = 0, /* Determined during first pass. */ + .ifindex = 0, /* Determined during second pass. */ + }; + union { + struct nlmsghdr nh; + uint8_t buf[NLMSG_HDRLEN + + NLA_HDRLEN + NLA_ALIGN(sizeof(data.ibindex)) + + NLA_HDRLEN + NLA_ALIGN(sizeof(pindex))]; + } req = { + .nh = { + .nlmsg_len = NLMSG_LENGTH(0), + .nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NLDEV_CMD_GET), + .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP, + }, + }; + struct nlattr *na; + int ret; + + ret = mlx5_nl_send(nl, &req.nh, seq); + if (ret < 0) + return 0; + ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data); + if (ret < 0) + return 0; + if (!data.ibindex) + goto error; + ++seq; + req.nh.nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NLDEV_CMD_PORT_GET); + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.buf) - NLMSG_HDRLEN); + na = (void *)((uintptr_t)req.buf + NLMSG_HDRLEN); + na->nla_len = NLA_HDRLEN + sizeof(data.ibindex); + na->nla_type = RDMA_NLDEV_ATTR_DEV_INDEX; + memcpy((void *)((uintptr_t)na + NLA_HDRLEN), + &data.ibindex, sizeof(data.ibindex)); + na = (void *)((uintptr_t)na + NLA_ALIGN(na->nla_len)); + na->nla_len = NLA_HDRLEN + sizeof(pindex); + na->nla_type = RDMA_NLDEV_ATTR_PORT_INDEX; + memcpy((void *)((uintptr_t)na + NLA_HDRLEN), + &pindex, sizeof(pindex)); + ret = mlx5_nl_send(nl, &req.nh, seq); + if (ret < 0) + return 0; + ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data); + if (ret < 0) + return 0; + if (!data.ifindex) + goto error; + return data.ifindex; +error: + rte_errno = ENODEV; + return 0; +} + +/** + * Process switch information from Netlink message. + * + * @param nh + * Pointer to Netlink message header. + * @param arg + * Opaque data pointer for this callback. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg) +{ + struct mlx5_switch_info info = { + .master = 0, + .representor = 0, + .port_name = 0, + .switch_id = 0, + }; + size_t off = NLMSG_LENGTH(sizeof(struct ifinfomsg)); + bool port_name_set = false; + bool switch_id_set = false; + + if (nh->nlmsg_type != RTM_NEWLINK) + goto error; + while (off < nh->nlmsg_len) { + struct rtattr *ra = (void *)((uintptr_t)nh + off); + void *payload = RTA_DATA(ra); + char *end; + unsigned int i; + + if (ra->rta_len > nh->nlmsg_len - off) + goto error; + switch (ra->rta_type) { + case IFLA_PHYS_PORT_NAME: + errno = 0; + info.port_name = strtol(payload, &end, 0); + if (errno || + (size_t)(end - (char *)payload) != strlen(payload)) + goto error; + port_name_set = true; + break; + case IFLA_PHYS_SWITCH_ID: + info.switch_id = 0; + for (i = 0; i < RTA_PAYLOAD(ra); ++i) { + info.switch_id <<= 8; + info.switch_id |= ((uint8_t *)payload)[i]; + } + switch_id_set = true; + break; + } + off += RTA_ALIGN(ra->rta_len); + } + info.master = switch_id_set && !port_name_set; + info.representor = switch_id_set && port_name_set; + memcpy(arg, &info, sizeof(info)); + return 0; +error: + rte_errno = EINVAL; + return -rte_errno; +} + +/** + * Get switch information associated with network interface. + * + * @param nl + * Netlink socket of the ROUTE kind (NETLINK_ROUTE). + * @param ifindex + * Network interface index. + * @param[out] info + * Switch information object, populated in case of success. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_switch_info(int nl, unsigned int ifindex, struct mlx5_switch_info *info) +{ + uint32_t seq = random(); + struct { + struct nlmsghdr nh; + struct ifinfomsg info; + } req = { + .nh = { + .nlmsg_len = NLMSG_LENGTH(sizeof(req.info)), + .nlmsg_type = RTM_GETLINK, + .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK, + }, + .info = { + .ifi_family = AF_UNSPEC, + .ifi_index = ifindex, + }, + }; + int ret; + + ret = mlx5_nl_send(nl, &req.nh, seq); + if (ret >= 0) + ret = mlx5_nl_recv(nl, seq, mlx5_nl_switch_info_cb, info); + return ret; +} diff --git a/drivers/net/mlx5/mlx5_nl_flow.c b/drivers/net/mlx5/mlx5_nl_flow.c new file mode 100644 index 00000000..a1c8c340 --- /dev/null +++ b/drivers/net/mlx5/mlx5_nl_flow.c @@ -0,0 +1,1248 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include <assert.h> +#include <errno.h> +#include <libmnl/libmnl.h> +#include <linux/if_ether.h> +#include <linux/netlink.h> +#include <linux/pkt_cls.h> +#include <linux/pkt_sched.h> +#include <linux/rtnetlink.h> +#include <linux/tc_act/tc_gact.h> +#include <linux/tc_act/tc_mirred.h> +#include <netinet/in.h> +#include <stdalign.h> +#include <stdbool.h> +#include <stddef.h> +#include <stdint.h> +#include <stdlib.h> +#include <sys/socket.h> + +#include <rte_byteorder.h> +#include <rte_errno.h> +#include <rte_ether.h> +#include <rte_flow.h> + +#include "mlx5.h" +#include "mlx5_autoconf.h" + +#ifdef HAVE_TC_ACT_VLAN + +#include <linux/tc_act/tc_vlan.h> + +#else /* HAVE_TC_ACT_VLAN */ + +#define TCA_VLAN_ACT_POP 1 +#define TCA_VLAN_ACT_PUSH 2 +#define TCA_VLAN_ACT_MODIFY 3 +#define TCA_VLAN_PARMS 2 +#define TCA_VLAN_PUSH_VLAN_ID 3 +#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4 +#define TCA_VLAN_PAD 5 +#define TCA_VLAN_PUSH_VLAN_PRIORITY 6 + +struct tc_vlan { + tc_gen; + int v_action; +}; + +#endif /* HAVE_TC_ACT_VLAN */ + +/* Normally found in linux/netlink.h. */ +#ifndef NETLINK_CAP_ACK +#define NETLINK_CAP_ACK 10 +#endif + +/* Normally found in linux/pkt_sched.h. */ +#ifndef TC_H_MIN_INGRESS +#define TC_H_MIN_INGRESS 0xfff2u +#endif + +/* Normally found in linux/pkt_cls.h. */ +#ifndef TCA_CLS_FLAGS_SKIP_SW +#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) +#endif +#ifndef HAVE_TCA_FLOWER_ACT +#define TCA_FLOWER_ACT 3 +#endif +#ifndef HAVE_TCA_FLOWER_FLAGS +#define TCA_FLOWER_FLAGS 22 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE +#define TCA_FLOWER_KEY_ETH_TYPE 8 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST +#define TCA_FLOWER_KEY_ETH_DST 4 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK +#define TCA_FLOWER_KEY_ETH_DST_MASK 5 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC +#define TCA_FLOWER_KEY_ETH_SRC 6 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK +#define TCA_FLOWER_KEY_ETH_SRC_MASK 7 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO +#define TCA_FLOWER_KEY_IP_PROTO 9 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC +#define TCA_FLOWER_KEY_IPV4_SRC 10 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK +#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST +#define TCA_FLOWER_KEY_IPV4_DST 12 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK +#define TCA_FLOWER_KEY_IPV4_DST_MASK 13 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC +#define TCA_FLOWER_KEY_IPV6_SRC 14 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK +#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST +#define TCA_FLOWER_KEY_IPV6_DST 16 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK +#define TCA_FLOWER_KEY_IPV6_DST_MASK 17 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC +#define TCA_FLOWER_KEY_TCP_SRC 18 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK +#define TCA_FLOWER_KEY_TCP_SRC_MASK 35 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST +#define TCA_FLOWER_KEY_TCP_DST 19 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK +#define TCA_FLOWER_KEY_TCP_DST_MASK 36 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC +#define TCA_FLOWER_KEY_UDP_SRC 20 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK +#define TCA_FLOWER_KEY_UDP_SRC_MASK 37 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST +#define TCA_FLOWER_KEY_UDP_DST 21 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK +#define TCA_FLOWER_KEY_UDP_DST_MASK 38 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID +#define TCA_FLOWER_KEY_VLAN_ID 23 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO +#define TCA_FLOWER_KEY_VLAN_PRIO 24 +#endif +#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE +#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25 +#endif + +/** Parser state definitions for mlx5_nl_flow_trans[]. */ +enum mlx5_nl_flow_trans { + INVALID, + BACK, + ATTR, + PATTERN, + ITEM_VOID, + ITEM_PORT_ID, + ITEM_ETH, + ITEM_VLAN, + ITEM_IPV4, + ITEM_IPV6, + ITEM_TCP, + ITEM_UDP, + ACTIONS, + ACTION_VOID, + ACTION_PORT_ID, + ACTION_DROP, + ACTION_OF_POP_VLAN, + ACTION_OF_PUSH_VLAN, + ACTION_OF_SET_VLAN_VID, + ACTION_OF_SET_VLAN_PCP, + END, +}; + +#define TRANS(...) (const enum mlx5_nl_flow_trans []){ __VA_ARGS__, INVALID, } + +#define PATTERN_COMMON \ + ITEM_VOID, ITEM_PORT_ID, ACTIONS +#define ACTIONS_COMMON \ + ACTION_VOID, ACTION_OF_POP_VLAN, ACTION_OF_PUSH_VLAN, \ + ACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP +#define ACTIONS_FATE \ + ACTION_PORT_ID, ACTION_DROP + +/** Parser state transitions used by mlx5_nl_flow_transpose(). */ +static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_trans[] = { + [INVALID] = NULL, + [BACK] = NULL, + [ATTR] = TRANS(PATTERN), + [PATTERN] = TRANS(ITEM_ETH, PATTERN_COMMON), + [ITEM_VOID] = TRANS(BACK), + [ITEM_PORT_ID] = TRANS(BACK), + [ITEM_ETH] = TRANS(ITEM_IPV4, ITEM_IPV6, ITEM_VLAN, PATTERN_COMMON), + [ITEM_VLAN] = TRANS(ITEM_IPV4, ITEM_IPV6, PATTERN_COMMON), + [ITEM_IPV4] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON), + [ITEM_IPV6] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON), + [ITEM_TCP] = TRANS(PATTERN_COMMON), + [ITEM_UDP] = TRANS(PATTERN_COMMON), + [ACTIONS] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [ACTION_VOID] = TRANS(BACK), + [ACTION_PORT_ID] = TRANS(ACTION_VOID, END), + [ACTION_DROP] = TRANS(ACTION_VOID, END), + [ACTION_OF_POP_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [ACTION_OF_PUSH_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [ACTION_OF_SET_VLAN_VID] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [ACTION_OF_SET_VLAN_PCP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON), + [END] = NULL, +}; + +/** Empty masks for known item types. */ +static const union { + struct rte_flow_item_port_id port_id; + struct rte_flow_item_eth eth; + struct rte_flow_item_vlan vlan; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_tcp tcp; + struct rte_flow_item_udp udp; +} mlx5_nl_flow_mask_empty; + +/** Supported masks for known item types. */ +static const struct { + struct rte_flow_item_port_id port_id; + struct rte_flow_item_eth eth; + struct rte_flow_item_vlan vlan; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_tcp tcp; + struct rte_flow_item_udp udp; +} mlx5_nl_flow_mask_supported = { + .port_id = { + .id = 0xffffffff, + }, + .eth = { + .type = RTE_BE16(0xffff), + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + }, + .vlan = { + /* PCP and VID only, no DEI. */ + .tci = RTE_BE16(0xefff), + .inner_type = RTE_BE16(0xffff), + }, + .ipv4.hdr = { + .next_proto_id = 0xff, + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + }, + .ipv6.hdr = { + .proto = 0xff, + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + }, + .tcp.hdr = { + .src_port = RTE_BE16(0xffff), + .dst_port = RTE_BE16(0xffff), + }, + .udp.hdr = { + .src_port = RTE_BE16(0xffff), + .dst_port = RTE_BE16(0xffff), + }, +}; + +/** + * Retrieve mask for pattern item. + * + * This function does basic sanity checks on a pattern item in order to + * return the most appropriate mask for it. + * + * @param[in] item + * Item specification. + * @param[in] mask_default + * Default mask for pattern item as specified by the flow API. + * @param[in] mask_supported + * Mask fields supported by the implementation. + * @param[in] mask_empty + * Empty mask to return when there is no specification. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * Either @p item->mask or one of the mask parameters on success, NULL + * otherwise and rte_errno is set. + */ +static const void * +mlx5_nl_flow_item_mask(const struct rte_flow_item *item, + const void *mask_default, + const void *mask_supported, + const void *mask_empty, + size_t mask_size, + struct rte_flow_error *error) +{ + const uint8_t *mask; + size_t i; + + /* item->last and item->mask cannot exist without item->spec. */ + if (!item->spec && (item->mask || item->last)) { + rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, + "\"mask\" or \"last\" field provided without a" + " corresponding \"spec\""); + return NULL; + } + /* No spec, no mask, no problem. */ + if (!item->spec) + return mask_empty; + mask = item->mask ? item->mask : mask_default; + assert(mask); + /* + * Single-pass check to make sure that: + * - Mask is supported, no bits are set outside mask_supported. + * - Both item->spec and item->last are included in mask. + */ + for (i = 0; i != mask_size; ++i) { + if (!mask[i]) + continue; + if ((mask[i] | ((const uint8_t *)mask_supported)[i]) != + ((const uint8_t *)mask_supported)[i]) { + rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask, "unsupported field found in \"mask\""); + return NULL; + } + if (item->last && + (((const uint8_t *)item->spec)[i] & mask[i]) != + (((const uint8_t *)item->last)[i] & mask[i])) { + rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST, + item->last, + "range between \"spec\" and \"last\" not" + " comprised in \"mask\""); + return NULL; + } + } + return mask; +} + +/** + * Transpose flow rule description to rtnetlink message. + * + * This function transposes a flow rule description to a traffic control + * (TC) filter creation message ready to be sent over Netlink. + * + * Target interface is specified as the first entry of the @p ptoi table. + * Subsequent entries enable this function to resolve other DPDK port IDs + * found in the flow rule. + * + * @param[out] buf + * Output message buffer. May be NULL when @p size is 0. + * @param size + * Size of @p buf. Message may be truncated if not large enough. + * @param[in] ptoi + * DPDK port ID to network interface index translation table. This table + * is terminated by an entry with a zero ifindex value. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification. + * @param[in] actions + * Associated actions. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A positive value representing the exact size of the message in bytes + * regardless of the @p size parameter on success, a negative errno value + * otherwise and rte_errno is set. + */ +int +mlx5_nl_flow_transpose(void *buf, + size_t size, + const struct mlx5_nl_flow_ptoi *ptoi, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + alignas(struct nlmsghdr) + uint8_t buf_tmp[mnl_nlmsg_size(sizeof(struct tcmsg) + 1024)]; + const struct rte_flow_item *item; + const struct rte_flow_action *action; + unsigned int n; + uint32_t act_index_cur; + bool in_port_id_set; + bool eth_type_set; + bool vlan_present; + bool vlan_eth_type_set; + bool ip_proto_set; + struct nlattr *na_flower; + struct nlattr *na_flower_act; + struct nlattr *na_vlan_id; + struct nlattr *na_vlan_priority; + const enum mlx5_nl_flow_trans *trans; + const enum mlx5_nl_flow_trans *back; + + if (!size) + goto error_nobufs; +init: + item = pattern; + action = actions; + n = 0; + act_index_cur = 0; + in_port_id_set = false; + eth_type_set = false; + vlan_present = false; + vlan_eth_type_set = false; + ip_proto_set = false; + na_flower = NULL; + na_flower_act = NULL; + na_vlan_id = NULL; + na_vlan_priority = NULL; + trans = TRANS(ATTR); + back = trans; +trans: + switch (trans[n++]) { + union { + const struct rte_flow_item_port_id *port_id; + const struct rte_flow_item_eth *eth; + const struct rte_flow_item_vlan *vlan; + const struct rte_flow_item_ipv4 *ipv4; + const struct rte_flow_item_ipv6 *ipv6; + const struct rte_flow_item_tcp *tcp; + const struct rte_flow_item_udp *udp; + } spec, mask; + union { + const struct rte_flow_action_port_id *port_id; + const struct rte_flow_action_of_push_vlan *of_push_vlan; + const struct rte_flow_action_of_set_vlan_vid * + of_set_vlan_vid; + const struct rte_flow_action_of_set_vlan_pcp * + of_set_vlan_pcp; + } conf; + struct nlmsghdr *nlh; + struct tcmsg *tcm; + struct nlattr *act_index; + struct nlattr *act; + unsigned int i; + + case INVALID: + if (item->type) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, "unsupported pattern item combination"); + else if (action->type) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + action, "unsupported action combination"); + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "flow rule lacks some kind of fate action"); + case BACK: + trans = back; + n = 0; + goto trans; + case ATTR: + /* + * Supported attributes: no groups, some priorities and + * ingress only. Don't care about transfer as it is the + * caller's problem. + */ + if (attr->group) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "groups are not supported"); + if (attr->priority > 0xfffe) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "lowest priority level is 0xfffe"); + if (!attr->ingress) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "only ingress is supported"); + if (attr->egress) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "egress is not supported"); + if (size < mnl_nlmsg_size(sizeof(*tcm))) + goto error_nobufs; + nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = 0; + nlh->nlmsg_flags = 0; + nlh->nlmsg_seq = 0; + tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); + tcm->tcm_family = AF_UNSPEC; + tcm->tcm_ifindex = ptoi[0].ifindex; + /* + * Let kernel pick a handle by default. A predictable handle + * can be set by the caller on the resulting buffer through + * mlx5_nl_flow_brand(). + */ + tcm->tcm_handle = 0; + tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS); + /* + * Priority cannot be zero to prevent the kernel from + * picking one automatically. + */ + tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16, + RTE_BE16(ETH_P_ALL)); + break; + case PATTERN: + if (!mnl_attr_put_strz_check(buf, size, TCA_KIND, "flower")) + goto error_nobufs; + na_flower = mnl_attr_nest_start_check(buf, size, TCA_OPTIONS); + if (!na_flower) + goto error_nobufs; + if (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_FLAGS, + TCA_CLS_FLAGS_SKIP_SW)) + goto error_nobufs; + break; + case ITEM_VOID: + if (item->type != RTE_FLOW_ITEM_TYPE_VOID) + goto trans; + ++item; + break; + case ITEM_PORT_ID: + if (item->type != RTE_FLOW_ITEM_TYPE_PORT_ID) + goto trans; + mask.port_id = mlx5_nl_flow_item_mask + (item, &rte_flow_item_port_id_mask, + &mlx5_nl_flow_mask_supported.port_id, + &mlx5_nl_flow_mask_empty.port_id, + sizeof(mlx5_nl_flow_mask_supported.port_id), error); + if (!mask.port_id) + return -rte_errno; + if (mask.port_id == &mlx5_nl_flow_mask_empty.port_id) { + in_port_id_set = 1; + ++item; + break; + } + spec.port_id = item->spec; + if (mask.port_id->id && mask.port_id->id != 0xffffffff) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.port_id, + "no support for partial mask on" + " \"id\" field"); + if (!mask.port_id->id) + i = 0; + else + for (i = 0; ptoi[i].ifindex; ++i) + if (ptoi[i].port_id == spec.port_id->id) + break; + if (!ptoi[i].ifindex) + return rte_flow_error_set + (error, ENODEV, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + spec.port_id, + "missing data to convert port ID to ifindex"); + tcm = mnl_nlmsg_get_payload(buf); + if (in_port_id_set && + ptoi[i].ifindex != (unsigned int)tcm->tcm_ifindex) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + spec.port_id, + "cannot match traffic for several port IDs" + " through a single flow rule"); + tcm->tcm_ifindex = ptoi[i].ifindex; + in_port_id_set = 1; + ++item; + break; + case ITEM_ETH: + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) + goto trans; + mask.eth = mlx5_nl_flow_item_mask + (item, &rte_flow_item_eth_mask, + &mlx5_nl_flow_mask_supported.eth, + &mlx5_nl_flow_mask_empty.eth, + sizeof(mlx5_nl_flow_mask_supported.eth), error); + if (!mask.eth) + return -rte_errno; + if (mask.eth == &mlx5_nl_flow_mask_empty.eth) { + ++item; + break; + } + spec.eth = item->spec; + if (mask.eth->type && mask.eth->type != RTE_BE16(0xffff)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.eth, + "no support for partial mask on" + " \"type\" field"); + if (mask.eth->type) { + if (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_ETH_TYPE, + spec.eth->type)) + goto error_nobufs; + eth_type_set = 1; + } + if ((!is_zero_ether_addr(&mask.eth->dst) && + (!mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_ETH_DST, + ETHER_ADDR_LEN, + spec.eth->dst.addr_bytes) || + !mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_ETH_DST_MASK, + ETHER_ADDR_LEN, + mask.eth->dst.addr_bytes))) || + (!is_zero_ether_addr(&mask.eth->src) && + (!mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_ETH_SRC, + ETHER_ADDR_LEN, + spec.eth->src.addr_bytes) || + !mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_ETH_SRC_MASK, + ETHER_ADDR_LEN, + mask.eth->src.addr_bytes)))) + goto error_nobufs; + ++item; + break; + case ITEM_VLAN: + if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) + goto trans; + mask.vlan = mlx5_nl_flow_item_mask + (item, &rte_flow_item_vlan_mask, + &mlx5_nl_flow_mask_supported.vlan, + &mlx5_nl_flow_mask_empty.vlan, + sizeof(mlx5_nl_flow_mask_supported.vlan), error); + if (!mask.vlan) + return -rte_errno; + if (!eth_type_set && + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_ETH_TYPE, + RTE_BE16(ETH_P_8021Q))) + goto error_nobufs; + eth_type_set = 1; + vlan_present = 1; + if (mask.vlan == &mlx5_nl_flow_mask_empty.vlan) { + ++item; + break; + } + spec.vlan = item->spec; + if ((mask.vlan->tci & RTE_BE16(0xe000) && + (mask.vlan->tci & RTE_BE16(0xe000)) != RTE_BE16(0xe000)) || + (mask.vlan->tci & RTE_BE16(0x0fff) && + (mask.vlan->tci & RTE_BE16(0x0fff)) != RTE_BE16(0x0fff)) || + (mask.vlan->inner_type && + mask.vlan->inner_type != RTE_BE16(0xffff))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.vlan, + "no support for partial masks on" + " \"tci\" (PCP and VID parts) and" + " \"inner_type\" fields"); + if (mask.vlan->inner_type) { + if (!mnl_attr_put_u16_check + (buf, size, TCA_FLOWER_KEY_VLAN_ETH_TYPE, + spec.vlan->inner_type)) + goto error_nobufs; + vlan_eth_type_set = 1; + } + if ((mask.vlan->tci & RTE_BE16(0xe000) && + !mnl_attr_put_u8_check + (buf, size, TCA_FLOWER_KEY_VLAN_PRIO, + (rte_be_to_cpu_16(spec.vlan->tci) >> 13) & 0x7)) || + (mask.vlan->tci & RTE_BE16(0x0fff) && + !mnl_attr_put_u16_check + (buf, size, TCA_FLOWER_KEY_VLAN_ID, + rte_be_to_cpu_16(spec.vlan->tci & RTE_BE16(0x0fff))))) + goto error_nobufs; + ++item; + break; + case ITEM_IPV4: + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) + goto trans; + mask.ipv4 = mlx5_nl_flow_item_mask + (item, &rte_flow_item_ipv4_mask, + &mlx5_nl_flow_mask_supported.ipv4, + &mlx5_nl_flow_mask_empty.ipv4, + sizeof(mlx5_nl_flow_mask_supported.ipv4), error); + if (!mask.ipv4) + return -rte_errno; + if ((!eth_type_set || !vlan_eth_type_set) && + !mnl_attr_put_u16_check(buf, size, + vlan_present ? + TCA_FLOWER_KEY_VLAN_ETH_TYPE : + TCA_FLOWER_KEY_ETH_TYPE, + RTE_BE16(ETH_P_IP))) + goto error_nobufs; + eth_type_set = 1; + vlan_eth_type_set = 1; + if (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4) { + ++item; + break; + } + spec.ipv4 = item->spec; + if (mask.ipv4->hdr.next_proto_id && + mask.ipv4->hdr.next_proto_id != 0xff) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.ipv4, + "no support for partial mask on" + " \"hdr.next_proto_id\" field"); + if (mask.ipv4->hdr.next_proto_id) { + if (!mnl_attr_put_u8_check + (buf, size, TCA_FLOWER_KEY_IP_PROTO, + spec.ipv4->hdr.next_proto_id)) + goto error_nobufs; + ip_proto_set = 1; + } + if ((mask.ipv4->hdr.src_addr && + (!mnl_attr_put_u32_check(buf, size, + TCA_FLOWER_KEY_IPV4_SRC, + spec.ipv4->hdr.src_addr) || + !mnl_attr_put_u32_check(buf, size, + TCA_FLOWER_KEY_IPV4_SRC_MASK, + mask.ipv4->hdr.src_addr))) || + (mask.ipv4->hdr.dst_addr && + (!mnl_attr_put_u32_check(buf, size, + TCA_FLOWER_KEY_IPV4_DST, + spec.ipv4->hdr.dst_addr) || + !mnl_attr_put_u32_check(buf, size, + TCA_FLOWER_KEY_IPV4_DST_MASK, + mask.ipv4->hdr.dst_addr)))) + goto error_nobufs; + ++item; + break; + case ITEM_IPV6: + if (item->type != RTE_FLOW_ITEM_TYPE_IPV6) + goto trans; + mask.ipv6 = mlx5_nl_flow_item_mask + (item, &rte_flow_item_ipv6_mask, + &mlx5_nl_flow_mask_supported.ipv6, + &mlx5_nl_flow_mask_empty.ipv6, + sizeof(mlx5_nl_flow_mask_supported.ipv6), error); + if (!mask.ipv6) + return -rte_errno; + if ((!eth_type_set || !vlan_eth_type_set) && + !mnl_attr_put_u16_check(buf, size, + vlan_present ? + TCA_FLOWER_KEY_VLAN_ETH_TYPE : + TCA_FLOWER_KEY_ETH_TYPE, + RTE_BE16(ETH_P_IPV6))) + goto error_nobufs; + eth_type_set = 1; + vlan_eth_type_set = 1; + if (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6) { + ++item; + break; + } + spec.ipv6 = item->spec; + if (mask.ipv6->hdr.proto && mask.ipv6->hdr.proto != 0xff) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.ipv6, + "no support for partial mask on" + " \"hdr.proto\" field"); + if (mask.ipv6->hdr.proto) { + if (!mnl_attr_put_u8_check + (buf, size, TCA_FLOWER_KEY_IP_PROTO, + spec.ipv6->hdr.proto)) + goto error_nobufs; + ip_proto_set = 1; + } + if ((!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr) && + (!mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_IPV6_SRC, + sizeof(spec.ipv6->hdr.src_addr), + spec.ipv6->hdr.src_addr) || + !mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_IPV6_SRC_MASK, + sizeof(mask.ipv6->hdr.src_addr), + mask.ipv6->hdr.src_addr))) || + (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.dst_addr) && + (!mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_IPV6_DST, + sizeof(spec.ipv6->hdr.dst_addr), + spec.ipv6->hdr.dst_addr) || + !mnl_attr_put_check(buf, size, + TCA_FLOWER_KEY_IPV6_DST_MASK, + sizeof(mask.ipv6->hdr.dst_addr), + mask.ipv6->hdr.dst_addr)))) + goto error_nobufs; + ++item; + break; + case ITEM_TCP: + if (item->type != RTE_FLOW_ITEM_TYPE_TCP) + goto trans; + mask.tcp = mlx5_nl_flow_item_mask + (item, &rte_flow_item_tcp_mask, + &mlx5_nl_flow_mask_supported.tcp, + &mlx5_nl_flow_mask_empty.tcp, + sizeof(mlx5_nl_flow_mask_supported.tcp), error); + if (!mask.tcp) + return -rte_errno; + if (!ip_proto_set && + !mnl_attr_put_u8_check(buf, size, + TCA_FLOWER_KEY_IP_PROTO, + IPPROTO_TCP)) + goto error_nobufs; + if (mask.tcp == &mlx5_nl_flow_mask_empty.tcp) { + ++item; + break; + } + spec.tcp = item->spec; + if ((mask.tcp->hdr.src_port && + mask.tcp->hdr.src_port != RTE_BE16(0xffff)) || + (mask.tcp->hdr.dst_port && + mask.tcp->hdr.dst_port != RTE_BE16(0xffff))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.tcp, + "no support for partial masks on" + " \"hdr.src_port\" and \"hdr.dst_port\"" + " fields"); + if ((mask.tcp->hdr.src_port && + (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_TCP_SRC, + spec.tcp->hdr.src_port) || + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_TCP_SRC_MASK, + mask.tcp->hdr.src_port))) || + (mask.tcp->hdr.dst_port && + (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_TCP_DST, + spec.tcp->hdr.dst_port) || + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_TCP_DST_MASK, + mask.tcp->hdr.dst_port)))) + goto error_nobufs; + ++item; + break; + case ITEM_UDP: + if (item->type != RTE_FLOW_ITEM_TYPE_UDP) + goto trans; + mask.udp = mlx5_nl_flow_item_mask + (item, &rte_flow_item_udp_mask, + &mlx5_nl_flow_mask_supported.udp, + &mlx5_nl_flow_mask_empty.udp, + sizeof(mlx5_nl_flow_mask_supported.udp), error); + if (!mask.udp) + return -rte_errno; + if (!ip_proto_set && + !mnl_attr_put_u8_check(buf, size, + TCA_FLOWER_KEY_IP_PROTO, + IPPROTO_UDP)) + goto error_nobufs; + if (mask.udp == &mlx5_nl_flow_mask_empty.udp) { + ++item; + break; + } + spec.udp = item->spec; + if ((mask.udp->hdr.src_port && + mask.udp->hdr.src_port != RTE_BE16(0xffff)) || + (mask.udp->hdr.dst_port && + mask.udp->hdr.dst_port != RTE_BE16(0xffff))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask.udp, + "no support for partial masks on" + " \"hdr.src_port\" and \"hdr.dst_port\"" + " fields"); + if ((mask.udp->hdr.src_port && + (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_UDP_SRC, + spec.udp->hdr.src_port) || + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_UDP_SRC_MASK, + mask.udp->hdr.src_port))) || + (mask.udp->hdr.dst_port && + (!mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_UDP_DST, + spec.udp->hdr.dst_port) || + !mnl_attr_put_u16_check(buf, size, + TCA_FLOWER_KEY_UDP_DST_MASK, + mask.udp->hdr.dst_port)))) + goto error_nobufs; + ++item; + break; + case ACTIONS: + if (item->type != RTE_FLOW_ITEM_TYPE_END) + goto trans; + assert(na_flower); + assert(!na_flower_act); + na_flower_act = + mnl_attr_nest_start_check(buf, size, TCA_FLOWER_ACT); + if (!na_flower_act) + goto error_nobufs; + act_index_cur = 1; + break; + case ACTION_VOID: + if (action->type != RTE_FLOW_ACTION_TYPE_VOID) + goto trans; + ++action; + break; + case ACTION_PORT_ID: + if (action->type != RTE_FLOW_ACTION_TYPE_PORT_ID) + goto trans; + conf.port_id = action->conf; + if (conf.port_id->original) + i = 0; + else + for (i = 0; ptoi[i].ifindex; ++i) + if (ptoi[i].port_id == conf.port_id->id) + break; + if (!ptoi[i].ifindex) + return rte_flow_error_set + (error, ENODEV, RTE_FLOW_ERROR_TYPE_ACTION_CONF, + conf.port_id, + "missing data to convert port ID to ifindex"); + act_index = + mnl_attr_nest_start_check(buf, size, act_index_cur++); + if (!act_index || + !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "mirred")) + goto error_nobufs; + act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); + if (!act) + goto error_nobufs; + if (!mnl_attr_put_check(buf, size, TCA_MIRRED_PARMS, + sizeof(struct tc_mirred), + &(struct tc_mirred){ + .action = TC_ACT_STOLEN, + .eaction = TCA_EGRESS_REDIR, + .ifindex = ptoi[i].ifindex, + })) + goto error_nobufs; + mnl_attr_nest_end(buf, act); + mnl_attr_nest_end(buf, act_index); + ++action; + break; + case ACTION_DROP: + if (action->type != RTE_FLOW_ACTION_TYPE_DROP) + goto trans; + act_index = + mnl_attr_nest_start_check(buf, size, act_index_cur++); + if (!act_index || + !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "gact")) + goto error_nobufs; + act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); + if (!act) + goto error_nobufs; + if (!mnl_attr_put_check(buf, size, TCA_GACT_PARMS, + sizeof(struct tc_gact), + &(struct tc_gact){ + .action = TC_ACT_SHOT, + })) + goto error_nobufs; + mnl_attr_nest_end(buf, act); + mnl_attr_nest_end(buf, act_index); + ++action; + break; + case ACTION_OF_POP_VLAN: + if (action->type != RTE_FLOW_ACTION_TYPE_OF_POP_VLAN) + goto trans; + conf.of_push_vlan = NULL; + i = TCA_VLAN_ACT_POP; + goto action_of_vlan; + case ACTION_OF_PUSH_VLAN: + if (action->type != RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) + goto trans; + conf.of_push_vlan = action->conf; + i = TCA_VLAN_ACT_PUSH; + goto action_of_vlan; + case ACTION_OF_SET_VLAN_VID: + if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) + goto trans; + conf.of_set_vlan_vid = action->conf; + if (na_vlan_id) + goto override_na_vlan_id; + i = TCA_VLAN_ACT_MODIFY; + goto action_of_vlan; + case ACTION_OF_SET_VLAN_PCP: + if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) + goto trans; + conf.of_set_vlan_pcp = action->conf; + if (na_vlan_priority) + goto override_na_vlan_priority; + i = TCA_VLAN_ACT_MODIFY; + goto action_of_vlan; +action_of_vlan: + act_index = + mnl_attr_nest_start_check(buf, size, act_index_cur++); + if (!act_index || + !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "vlan")) + goto error_nobufs; + act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS); + if (!act) + goto error_nobufs; + if (!mnl_attr_put_check(buf, size, TCA_VLAN_PARMS, + sizeof(struct tc_vlan), + &(struct tc_vlan){ + .action = TC_ACT_PIPE, + .v_action = i, + })) + goto error_nobufs; + if (i == TCA_VLAN_ACT_POP) { + mnl_attr_nest_end(buf, act); + mnl_attr_nest_end(buf, act_index); + ++action; + break; + } + if (i == TCA_VLAN_ACT_PUSH && + !mnl_attr_put_u16_check(buf, size, + TCA_VLAN_PUSH_VLAN_PROTOCOL, + conf.of_push_vlan->ethertype)) + goto error_nobufs; + na_vlan_id = mnl_nlmsg_get_payload_tail(buf); + if (!mnl_attr_put_u16_check(buf, size, TCA_VLAN_PAD, 0)) + goto error_nobufs; + na_vlan_priority = mnl_nlmsg_get_payload_tail(buf); + if (!mnl_attr_put_u8_check(buf, size, TCA_VLAN_PAD, 0)) + goto error_nobufs; + mnl_attr_nest_end(buf, act); + mnl_attr_nest_end(buf, act_index); + if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) { +override_na_vlan_id: + na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID; + *(uint16_t *)mnl_attr_get_payload(na_vlan_id) = + rte_be_to_cpu_16 + (conf.of_set_vlan_vid->vlan_vid); + } else if (action->type == + RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) { +override_na_vlan_priority: + na_vlan_priority->nla_type = + TCA_VLAN_PUSH_VLAN_PRIORITY; + *(uint8_t *)mnl_attr_get_payload(na_vlan_priority) = + conf.of_set_vlan_pcp->vlan_pcp; + } + ++action; + break; + case END: + if (item->type != RTE_FLOW_ITEM_TYPE_END || + action->type != RTE_FLOW_ACTION_TYPE_END) + goto trans; + if (na_flower_act) + mnl_attr_nest_end(buf, na_flower_act); + if (na_flower) + mnl_attr_nest_end(buf, na_flower); + nlh = buf; + return nlh->nlmsg_len; + } + back = trans; + trans = mlx5_nl_flow_trans[trans[n - 1]]; + n = 0; + goto trans; +error_nobufs: + if (buf != buf_tmp) { + buf = buf_tmp; + size = sizeof(buf_tmp); + goto init; + } + return rte_flow_error_set + (error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "generated TC message is too large"); +} + +/** + * Brand rtnetlink buffer with unique handle. + * + * This handle should be unique for a given network interface to avoid + * collisions. + * + * @param buf + * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). + * @param handle + * Unique 32-bit handle to use. + */ +void +mlx5_nl_flow_brand(void *buf, uint32_t handle) +{ + struct tcmsg *tcm = mnl_nlmsg_get_payload(buf); + + tcm->tcm_handle = handle; +} + +/** + * Send Netlink message with acknowledgment. + * + * @param nl + * Libmnl socket to use. + * @param nlh + * Message to send. This function always raises the NLM_F_ACK flag before + * sending. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_flow_nl_ack(struct mnl_socket *nl, struct nlmsghdr *nlh) +{ + alignas(struct nlmsghdr) + uint8_t ans[mnl_nlmsg_size(sizeof(struct nlmsgerr)) + + nlh->nlmsg_len - sizeof(*nlh)]; + uint32_t seq = random(); + int ret; + + nlh->nlmsg_flags |= NLM_F_ACK; + nlh->nlmsg_seq = seq; + ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len); + if (ret != -1) + ret = mnl_socket_recvfrom(nl, ans, sizeof(ans)); + if (ret != -1) + ret = mnl_cb_run + (ans, ret, seq, mnl_socket_get_portid(nl), NULL, NULL); + if (!ret) + return 0; + rte_errno = errno; + return -rte_errno; +} + +/** + * Create a Netlink flow rule. + * + * @param nl + * Libmnl socket to use. + * @param buf + * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_flow_create(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error) +{ + struct nlmsghdr *nlh = buf; + + nlh->nlmsg_type = RTM_NEWTFILTER; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; + if (!mlx5_nl_flow_nl_ack(nl, nlh)) + return 0; + return rte_flow_error_set + (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "netlink: failed to create TC flow rule"); +} + +/** + * Destroy a Netlink flow rule. + * + * @param nl + * Libmnl socket to use. + * @param buf + * Flow rule buffer previously initialized by mlx5_nl_flow_transpose(). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error) +{ + struct nlmsghdr *nlh = buf; + + nlh->nlmsg_type = RTM_DELTFILTER; + nlh->nlmsg_flags = NLM_F_REQUEST; + if (!mlx5_nl_flow_nl_ack(nl, nlh)) + return 0; + return rte_flow_error_set + (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "netlink: failed to destroy TC flow rule"); +} + +/** + * Initialize ingress qdisc of a given network interface. + * + * @param nl + * Libmnl socket of the @p NETLINK_ROUTE kind. + * @param ifindex + * Index of network interface to initialize. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex, + struct rte_flow_error *error) +{ + struct nlmsghdr *nlh; + struct tcmsg *tcm; + alignas(struct nlmsghdr) + uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)]; + + /* Destroy existing ingress qdisc and everything attached to it. */ + nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = RTM_DELQDISC; + nlh->nlmsg_flags = NLM_F_REQUEST; + tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); + tcm->tcm_family = AF_UNSPEC; + tcm->tcm_ifindex = ifindex; + tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0); + tcm->tcm_parent = TC_H_INGRESS; + /* Ignore errors when qdisc is already absent. */ + if (mlx5_nl_flow_nl_ack(nl, nlh) && + rte_errno != EINVAL && rte_errno != ENOENT) + return rte_flow_error_set + (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "netlink: failed to remove ingress qdisc"); + /* Create fresh ingress qdisc. */ + nlh = mnl_nlmsg_put_header(buf); + nlh->nlmsg_type = RTM_NEWQDISC; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; + tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm)); + tcm->tcm_family = AF_UNSPEC; + tcm->tcm_ifindex = ifindex; + tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0); + tcm->tcm_parent = TC_H_INGRESS; + mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress"); + if (mlx5_nl_flow_nl_ack(nl, nlh)) + return rte_flow_error_set + (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "netlink: failed to create ingress qdisc"); + return 0; +} + +/** + * Create and configure a libmnl socket for Netlink flow rules. + * + * @return + * A valid libmnl socket object pointer on success, NULL otherwise and + * rte_errno is set. + */ +struct mnl_socket * +mlx5_nl_flow_socket_create(void) +{ + struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE); + + if (nl) { + mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 }, + sizeof(int)); + if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID)) + return nl; + } + rte_errno = errno; + if (nl) + mnl_socket_close(nl); + return NULL; +} + +/** + * Destroy a libmnl socket. + */ +void +mlx5_nl_flow_socket_destroy(struct mnl_socket *nl) +{ + mnl_socket_close(nl); +} diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 0cf370cd..0870d32f 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -21,6 +21,9 @@ #include <rte_vect.h> #include "mlx5_autoconf.h" +/* RSS hash key size. */ +#define MLX5_RSS_HASH_KEY_LEN 40 + /* Get CQE owner bit. */ #define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK) @@ -240,7 +243,9 @@ struct mlx5_cqe { uint8_t padding[64]; #endif uint8_t pkt_info; - uint8_t rsvd0[11]; + uint8_t rsvd0; + uint16_t wqe_id; + uint8_t rsvd3[8]; uint32_t rx_hash_res; uint8_t rx_hash_type; uint8_t rsvd1[11]; @@ -285,7 +290,10 @@ struct mlx5_cqe { struct mlx5_mini_cqe8 { union { uint32_t rx_hash_result; - uint32_t checksum; + struct { + uint16_t checksum; + uint16_t stride_idx; + }; struct { uint16_t wqe_counter; uint8_t s_wqe_opcode; diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index d69b4c09..b95778a8 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -50,10 +50,11 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev, return -rte_errno; } if (rss_conf->rss_key && rss_conf->rss_key_len) { - if (rss_conf->rss_key_len != rss_hash_default_key_len) { + if (rss_conf->rss_key_len != MLX5_RSS_HASH_KEY_LEN) { DRV_LOG(ERR, - "port %u RSS key len must be %zu Bytes long", - dev->data->port_id, rss_hash_default_key_len); + "port %u RSS key len must be %s Bytes long", + dev->data->port_id, + RTE_STR(MLX5_RSS_HASH_KEY_LEN)); rte_errno = EINVAL; return -rte_errno; } diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 80824bc4..e74fdef8 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -32,10 +32,18 @@ void mlx5_promiscuous_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret; dev->data->promiscuous = 1; - if (((struct priv *)dev->data->dev_private)->config.vf) + if (priv->isolated) { + DRV_LOG(WARNING, + "port %u cannot enable promiscuous mode" + " in flow isolation mode", + dev->data->port_id); + return; + } + if (priv->config.vf) mlx5_nl_promisc(dev, 1); ret = mlx5_traffic_restart(dev); if (ret) @@ -52,10 +60,11 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev) void mlx5_promiscuous_disable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret; dev->data->promiscuous = 0; - if (((struct priv *)dev->data->dev_private)->config.vf) + if (priv->config.vf) mlx5_nl_promisc(dev, 0); ret = mlx5_traffic_restart(dev); if (ret) @@ -72,10 +81,18 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev) void mlx5_allmulticast_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret; dev->data->all_multicast = 1; - if (((struct priv *)dev->data->dev_private)->config.vf) + if (priv->isolated) { + DRV_LOG(WARNING, + "port %u cannot enable allmulticast mode" + " in flow isolation mode", + dev->data->port_id); + return; + } + if (priv->config.vf) mlx5_nl_allmulti(dev, 1); ret = mlx5_traffic_restart(dev); if (ret) @@ -92,10 +109,11 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev) void mlx5_allmulticast_disable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret; dev->data->all_multicast = 0; - if (((struct priv *)dev->data->dev_private)->config.vf) + if (priv->config.vf) mlx5_nl_allmulti(dev, 0); ret = mlx5_traffic_restart(dev); if (ret) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index de3f869e..1f7bfd44 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -52,7 +52,9 @@ uint8_t rss_hash_default_key[] = { }; /* Length of the default RSS hash key. */ -const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); +static_assert(MLX5_RSS_HASH_KEY_LEN == + (unsigned int)sizeof(rss_hash_default_key), + "wrong RSS default key size."); /** * Check whether Multi-Packet RQ can be enabled for the device. @@ -386,8 +388,10 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_JUMBO_FRAME); + offloads |= DEV_RX_OFFLOAD_CRC_STRIP; if (config->hw_fcs_strip) - offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + if (config->hw_csum) offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -643,7 +647,8 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) doorbell = (uint64_t)doorbell_hi << 32; doorbell |= rxq->cqn; rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); - rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg); + mlx5_uar_write64(rte_cpu_to_be_64(doorbell), + cq_db_reg, rxq->uar_lock_cq); } /** @@ -818,7 +823,13 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (config->cqe_comp && !rxq_data->hw_timestamp) { attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + attr.cq.mlx5.cqe_comp_res_format = + mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX : + MLX5DV_CQE_RES_FORMAT_HASH; +#else attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; +#endif /* * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. @@ -976,7 +987,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rxq_data->rq_db = rwq.dbrec; rxq_data->cqe_n = log2above(cq_info.cqe_cnt); rxq_data->cq_ci = 0; - rxq_data->strd_ci = 0; + rxq_data->consumed_strd = 0; rxq_data->rq_pi = 0; rxq_data->zip = (struct rxq_zip){ .ai = 0, @@ -993,8 +1004,6 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, idx, (void *)&tmpl); rte_atomic32_inc(&tmpl->refcnt); - DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", - dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; @@ -1036,9 +1045,6 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->ibv) { rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); - DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", - dev->data->port_id, rxq_ctrl->idx, - rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); } return rxq_ctrl->ibv; } @@ -1058,9 +1064,6 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) assert(rxq_ibv); assert(rxq_ibv->wq); assert(rxq_ibv->cq); - DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", - PORT_ID(rxq_ibv->rxq_ctrl->priv), - rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { rxq_free_elts(rxq_ibv->rxq_ctrl); claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq)); @@ -1231,6 +1234,13 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) */ desc *= 4; obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n; + /* + * rte_mempool_create_empty() has sanity check to refuse large cache + * size compared to the number of elements. + * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a + * constant number 2 instead. + */ + obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2); /* Check a mempool is already allocated and if it can be resued. */ if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) { DRV_LOG(DEBUG, "port %u mempool %s is being reused", @@ -1346,7 +1356,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM; if (mprq_en && - desc >= (1U << config->mprq.stride_num_n) && + desc > (1U << config->mprq.stride_num_n) && mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) { /* TODO: Rx scatter isn't supported yet. */ tmpl->rxq.sges_n = 0; @@ -1401,6 +1411,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->dev_conf.rxmode.max_rx_pkt_len, mb_len - RTE_PKTMBUF_HEADROOM); } + if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) + DRV_LOG(WARNING, + "port %u MPRQ is requested but cannot be enabled" + " (requested: desc = %u, stride_sz = %u," + " supported: min_stride_num = %u, max_stride_sz = %u).", + dev->data->port_id, desc, mprq_stride_size, + (1 << config->mprq.stride_num_n), + (1 << config->mprq.max_stride_size_n)); DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { @@ -1419,17 +1437,17 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, /* Configure VLAN stripping. */ tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ - if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) { - tmpl->rxq.crc_present = 0; - } else if (config->hw_fcs_strip) { - tmpl->rxq.crc_present = 1; - } else { - DRV_LOG(WARNING, - "port %u CRC stripping has been disabled but will" - " still be performed by hardware, make sure MLNX_OFED" - " and firmware are up to date", - dev->data->port_id); - tmpl->rxq.crc_present = 0; + tmpl->rxq.crc_present = 0; + if (rte_eth_dev_must_keep_crc(offloads)) { + if (config->hw_fcs_strip) { + tmpl->rxq.crc_present = 1; + } else { + DRV_LOG(WARNING, + "port %u CRC stripping has been disabled but will" + " still be performed by hardware, make sure MLNX_OFED" + " and firmware are up to date", + dev->data->port_id); + } } DRV_LOG(DEBUG, "port %u CRC stripping is %s, %u bytes will be subtracted from" @@ -1447,10 +1465,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.elts = (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); +#ifndef RTE_ARCH_64 + tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; +#endif tmpl->idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id, - idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1481,9 +1500,6 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) rxq); mlx5_rxq_ibv_get(dev, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); - DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", - dev->data->port_id, rxq_ctrl->idx, - rte_atomic32_read(&rxq_ctrl->refcnt)); } return rxq_ctrl; } @@ -1511,8 +1527,6 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) assert(rxq_ctrl->priv); if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv)) rxq_ctrl->ibv = NULL; - DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id, - rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); LIST_REMOVE(rxq_ctrl, next); @@ -1630,14 +1644,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, } rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - DEBUG("port %u new indirection table %p: queues:%u refcnt:%d", - dev->data->port_id, (void *)ind_tbl, 1 << wq_n, - rte_atomic32_read(&ind_tbl->refcnt)); return ind_tbl; error: rte_free(ind_tbl); - DRV_LOG(DEBUG, "port %u cannot create indirection table", - dev->data->port_id); + DEBUG("port %u cannot create indirection table", dev->data->port_id); return NULL; } @@ -1672,9 +1682,6 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues, unsigned int i; rte_atomic32_inc(&ind_tbl->refcnt); - DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", - dev->data->port_id, (void *)ind_tbl, - rte_atomic32_read(&ind_tbl->refcnt)); for (i = 0; i != ind_tbl->queues_n; ++i) mlx5_rxq_get(dev, ind_tbl->queues[i]); } @@ -1698,15 +1705,9 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, { unsigned int i; - DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", - dev->data->port_id, (void *)ind_tbl, - rte_atomic32_read(&ind_tbl->refcnt)); - if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) { + if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) claim_zero(mlx5_glue->destroy_rwq_ind_table (ind_tbl->ind_table)); - DEBUG("port %u delete indirection table %p: queues: %u", - dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n); - } for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { @@ -1758,10 +1759,6 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. - * @param tunnel - * Tunnel type, implies tunnel offloading like inner checksum if available. - * @param rss_level - * RSS hash on tunnel level. * * @return * The Verbs object initialised, NULL otherwise and rte_errno is set. @@ -1771,16 +1768,13 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - uint32_t tunnel, uint32_t rss_level) + int tunnel __rte_unused) { struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; int err; -#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - struct mlx5dv_qp_init_attr qp_init_attr = {0}; -#endif queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); @@ -1791,15 +1785,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, return NULL; } if (!rss_key_len) { - rss_key_len = rss_hash_default_key_len; + rss_key_len = MLX5_RSS_HASH_KEY_LEN; rss_key = rss_hash_default_key; } #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - if (tunnel) { - qp_init_attr.comp_mask = - MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; - qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; - } qp = mlx5_glue->dv_create_qp (priv->ctx, &(struct ibv_qp_init_attr_ex){ @@ -1811,25 +1800,20 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, .rx_hash_key_len = rss_key_len ? rss_key_len : - rss_hash_default_key_len, + MLX5_RSS_HASH_KEY_LEN, .rx_hash_key = rss_key ? (void *)(uintptr_t)rss_key : rss_hash_default_key, - .rx_hash_fields_mask = hash_fields | - (tunnel && rss_level > 1 ? - (uint32_t)IBV_RX_HASH_INNER : 0), + .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, }, - &qp_init_attr); - DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64 - " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64 - " create_flags:0x%x", - dev->data->port_id, (void *)qp, (void *)ind_tbl, - (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) | - hash_fields, tunnel, rss_level, - qp_init_attr.comp_mask, qp_init_attr.create_flags); + &(struct mlx5dv_qp_init_attr){ + .comp_mask = tunnel ? + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0, + .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS, + }); #else qp = mlx5_glue->create_qp_ex (priv->ctx, @@ -1842,7 +1826,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, .rx_hash_key_len = rss_key_len ? rss_key_len : - rss_hash_default_key_len, + MLX5_RSS_HASH_KEY_LEN, .rx_hash_key = rss_key ? (void *)(uintptr_t)rss_key : rss_hash_default_key, @@ -1851,10 +1835,6 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, }); - DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64 - " tunnel:0x%x level:%hhu", - dev->data->port_id, (void *)qp, (void *)ind_tbl, - hash_fields, tunnel, rss_level); #endif if (!qp) { rte_errno = errno; @@ -1867,14 +1847,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, hrxq->qp = qp; hrxq->rss_key_len = rss_key_len; hrxq->hash_fields = hash_fields; - hrxq->tunnel = tunnel; - hrxq->rss_level = rss_level; memcpy(hrxq->rss_key, rss_key, rss_key_len); rte_atomic32_inc(&hrxq->refcnt); LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", - dev->data->port_id, (void *)hrxq, - rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: err = rte_errno; /* Save rte_errno before cleanup. */ @@ -1897,10 +1872,6 @@ error: * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. - * @param tunnel - * Tunnel type, implies tunnel offloading like inner checksum if available. - * @param rss_level - * RSS hash on tunnel level * * @return * An hash Rx queue on success. @@ -1909,8 +1880,7 @@ struct mlx5_hrxq * mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - uint32_t tunnel, uint32_t rss_level) + const uint16_t *queues, uint32_t queues_n) { struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; @@ -1925,10 +1895,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, continue; if (hrxq->hash_fields != hash_fields) continue; - if (hrxq->tunnel != tunnel) - continue; - if (hrxq->rss_level != rss_level) - continue; ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) continue; @@ -1937,9 +1903,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, continue; } rte_atomic32_inc(&hrxq->refcnt); - DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", - dev->data->port_id, (void *)hrxq, - rte_atomic32_read(&hrxq->refcnt)); return hrxq; } return NULL; @@ -1959,15 +1922,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { - DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", - dev->data->port_id, (void *)hrxq, - rte_atomic32_read(&hrxq->refcnt)); if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:" - " 0x%x, level: %u", - dev->data->port_id, (void *)hrxq, hrxq->hash_fields, - hrxq->tunnel, hrxq->rss_level); mlx5_ind_table_ibv_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); @@ -2001,3 +1957,235 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) } return ret; } + +/** + * Create a drop Rx queue Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct ibv_cq *cq; + struct ibv_wq *wq = NULL; + struct mlx5_rxq_ibv *rxq; + + if (priv->drop_queue.rxq) + return priv->drop_queue.rxq; + cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); + if (!cq) { + DEBUG("port %u cannot allocate CQ for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + wq = mlx5_glue->create_wq(priv->ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = 1, + .max_sge = 1, + .pd = priv->pd, + .cq = cq, + }); + if (!wq) { + DEBUG("port %u cannot allocate WQ for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0); + if (!rxq) { + DEBUG("port %u cannot allocate drop Rx queue memory", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq->cq = cq; + rxq->wq = wq; + priv->drop_queue.rxq = rxq; + return rxq; +error: + if (wq) + claim_zero(mlx5_glue->destroy_wq(wq)); + if (cq) + claim_zero(mlx5_glue->destroy_cq(cq)); + return NULL; +} + +/** + * Release a drop Rx queue Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +void +mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq; + + if (rxq->wq) + claim_zero(mlx5_glue->destroy_wq(rxq->wq)); + if (rxq->cq) + claim_zero(mlx5_glue->destroy_cq(rxq->cq)); + rte_free(rxq); + priv->drop_queue.rxq = NULL; +} + +/** + * Create a drop indirection table. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_rxq_ibv *rxq; + struct mlx5_ind_table_ibv tmpl; + + rxq = mlx5_rxq_ibv_drop_new(dev); + if (!rxq) + return NULL; + tmpl.ind_table = mlx5_glue->create_rwq_ind_table + (priv->ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = 0, + .ind_tbl = &rxq->wq, + .comp_mask = 0, + }); + if (!tmpl.ind_table) { + DEBUG("port %u cannot allocate indirection table for drop" + " queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0); + if (!ind_tbl) { + rte_errno = ENOMEM; + goto error; + } + ind_tbl->ind_table = tmpl.ind_table; + return ind_tbl; +error: + mlx5_rxq_ibv_drop_release(dev); + return NULL; +} + +/** + * Release a drop indirection table. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table; + + claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); + mlx5_rxq_ibv_drop_release(dev); + rte_free(ind_tbl); + priv->drop_queue.hrxq->ind_table = NULL; +} + +/** + * Create a drop Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_hrxq * +mlx5_hrxq_drop_new(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl; + struct ibv_qp *qp; + struct mlx5_hrxq *hrxq; + + if (priv->drop_queue.hrxq) { + rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); + return priv->drop_queue.hrxq; + } + ind_tbl = mlx5_ind_table_ibv_drop_new(dev); + if (!ind_tbl) + return NULL; + qp = mlx5_glue->create_qp_ex(priv->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = + IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN, + .rx_hash_key = rss_hash_default_key, + .rx_hash_fields_mask = 0, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->pd + }); + if (!qp) { + DEBUG("port %u cannot allocate QP for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0); + if (!hrxq) { + DRV_LOG(WARNING, + "port %u cannot allocate memory for drop queue", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + hrxq->ind_table = ind_tbl; + hrxq->qp = qp; + priv->drop_queue.hrxq = hrxq; + rte_atomic32_set(&hrxq->refcnt, 1); + return hrxq; +error: + if (ind_tbl) + mlx5_ind_table_ibv_drop_release(dev); + return NULL; +} + +/** + * Release a drop hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_hrxq_drop_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + + if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + mlx5_ind_table_ibv_drop_release(dev); + rte_free(hrxq); + priv->drop_queue.hrxq = NULL; + } +} diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 52785946..2d14f8a6 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -38,7 +38,7 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); static __rte_always_inline int mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, - uint16_t cqe_cnt, uint32_t *rss_hash); + uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); static __rte_always_inline uint32_t rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); @@ -495,6 +495,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) volatile struct mlx5_wqe_ctrl *last_wqe = NULL; unsigned int segs_n = 0; const unsigned int max_inline = txq->max_inline; + uint64_t addr_64; if (unlikely(!pkts_n)) return 0; @@ -503,8 +504,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; @@ -711,12 +710,12 @@ pkt_inline: ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - addr = rte_cpu_to_be_64(addr); + addr_64 = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; ++ds; if (!segs_n) @@ -750,12 +749,12 @@ next_seg: total_length += length; #endif /* Store segment information. */ - addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); + addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; (*txq->elts)[++elts_head & elts_m] = buf; if (--segs_n) @@ -816,14 +815,13 @@ next_wqe: /* Check whether completion threshold has been reached. */ comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -942,8 +940,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; @@ -1032,14 +1028,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (comp >= MLX5_TX_COMP_THRESH) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -1171,8 +1166,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); do { struct rte_mbuf *buf = *(pkts++); uintptr_t addr; @@ -1329,14 +1322,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if (comp >= MLX5_TX_COMP_THRESH) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp = comp; } @@ -1450,6 +1442,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, unsigned int mpw_room = 0; unsigned int inl_pad = 0; uint32_t inl_hdr; + uint64_t addr_64; struct mlx5_mpw mpw = { .state = MLX5_MPW_STATE_CLOSED, }; @@ -1459,8 +1452,6 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, /* Start processing. */ mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); if (unlikely(!max_wqe)) return 0; @@ -1586,13 +1577,13 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, ((uintptr_t)mpw.data.raw + inl_pad); (*txq->elts)[elts_head++ & elts_m] = buf; - addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, - uintptr_t)); + addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, + uintptr_t)); *dseg = (rte_v128u32_t) { rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - addr, - addr >> 32, + addr_64, + addr_64 >> 32, }; mpw.data.raw = (volatile void *)(dseg + 1); mpw.total_len += (inl_pad + sizeof(*dseg)); @@ -1616,15 +1607,14 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) { volatile struct mlx5_wqe *wqe = mpw.wqe; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request completion on last WQE. */ wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; txq->mpw_comp = txq->wqe_ci; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } else { txq->elts_comp += j; } @@ -1722,8 +1712,9 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) * Pointer to RX queue. * @param cqe * CQE to process. - * @param[out] rss_hash - * Packet RSS Hash result. + * @param[out] mcqe + * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not + * written. * * @return * Packet size in bytes (0 if there is none), -1 in case of completion @@ -1731,7 +1722,7 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) */ static inline int mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, - uint16_t cqe_cnt, uint32_t *rss_hash) + uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) { struct rxq_zip *zip = &rxq->zip; uint16_t cqe_n = cqe_cnt + 1; @@ -1745,7 +1736,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); - *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); + *mcqe = &(*mc)[zip->ai & 7]; if ((++zip->ai & 7) == 0) { /* Invalidate consumed CQEs */ idx = zip->ca; @@ -1810,7 +1801,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ len = rte_be_to_cpu_32((*mc)[0].byte_cnt); - *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); + *mcqe = &(*mc)[0]; zip->ai = 1; /* Prefetch all the entries to be invalidated */ idx = zip->ca; @@ -1821,7 +1812,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, } } else { len = rte_be_to_cpu_32(cqe->byte_cnt); - *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1934,7 +1924,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) volatile struct mlx5_wqe_data_seg *wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; struct rte_mbuf *rep = (*rxq->elts)[idx]; - uint32_t rss_hash_res = 0; + volatile struct mlx5_mini_cqe8 *mcqe = NULL; + uint32_t rss_hash_res; if (pkt) NEXT(seg) = rep; @@ -1964,8 +1955,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } if (!pkt) { cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; - len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, - &rss_hash_res); + len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); if (!len) { rte_mbuf_raw_free(rep); break; @@ -1979,6 +1969,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) pkt = seg; assert(len >= (rxq->crc_present << 2)); pkt->ol_flags = 0; + /* If compressed, take hash result from mini-CQE. */ + rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ? + cqe->rx_hash_res : + mcqe->rx_hash_result); rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); if (rxq->crc_present) len -= ETHER_CRC_LEN; @@ -2104,7 +2098,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; unsigned int i = 0; uint16_t rq_ci = rxq->rq_ci; - uint16_t strd_idx = rxq->strd_ci; + uint16_t consumed_strd = rxq->consumed_strd; struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; while (i < pkts_n) { @@ -2112,12 +2106,14 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) void *addr; int ret; unsigned int len; - uint16_t consumed_strd; + uint16_t strd_cnt; + uint16_t strd_idx; uint32_t offset; uint32_t byte_cnt; + volatile struct mlx5_mini_cqe8 *mcqe = NULL; uint32_t rss_hash_res = 0; - if (strd_idx == strd_n) { + if (consumed_strd == strd_n) { /* Replace WQE only if the buffer is still in use. */ if (rte_atomic16_read(&buf->refcnt) > 1) { mprq_buf_replace(rxq, rq_ci & wq_mask); @@ -2137,12 +2133,12 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rxq->mprq_repl = rep; } /* Advance to the next WQE. */ - strd_idx = 0; + consumed_strd = 0; ++rq_ci; buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; } cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; - ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &rss_hash_res); + ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); if (!ret) break; if (unlikely(ret == -1)) { @@ -2151,14 +2147,21 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) continue; } byte_cnt = ret; - consumed_strd = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> - MLX5_MPRQ_STRIDE_NUM_SHIFT; - assert(consumed_strd); - /* Calculate offset before adding up stride index. */ - offset = strd_idx * strd_sz + strd_shift; - strd_idx += consumed_strd; + strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> + MLX5_MPRQ_STRIDE_NUM_SHIFT; + assert(strd_cnt); + consumed_strd += strd_cnt; if (byte_cnt & MLX5_MPRQ_FILLER_MASK) continue; + if (mcqe == NULL) { + rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res); + strd_idx = rte_be_to_cpu_16(cqe->wqe_counter); + } else { + /* mini-CQE for MPRQ doesn't have hash result. */ + strd_idx = rte_be_to_cpu_16(mcqe->stride_idx); + } + assert(strd_idx < strd_n); + assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask)); /* * Currently configured to receive a packet per a stride. But if * MTU is adjusted through kernel interface, device could @@ -2166,7 +2169,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * case, the packet should be dropped because it is bigger than * the max_rx_pkt_len. */ - if (unlikely(consumed_strd > 1)) { + if (unlikely(strd_cnt > 1)) { ++rxq->stats.idropped; continue; } @@ -2179,6 +2182,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) assert((int)len >= (rxq->crc_present << 2)); if (rxq->crc_present) len -= ETHER_CRC_LEN; + offset = strd_idx * strd_sz + strd_shift; addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset); /* Initialize the offload flag. */ pkt->ol_flags = 0; @@ -2201,7 +2205,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } else { rte_iova_t buf_iova; struct rte_mbuf_ext_shared_info *shinfo; - uint16_t buf_len = consumed_strd * strd_sz; + uint16_t buf_len = strd_cnt * strd_sz; /* Increment the refcnt of the whole chunk. */ rte_atomic16_add_return(&buf->refcnt, 1); @@ -2250,7 +2254,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) ++i; } /* Update the consumer indexes. */ - rxq->strd_ci = strd_idx; + rxq->consumed_strd = consumed_strd; rte_cio_wmb(); *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); if (rq_ci != rxq->rq_ci) { diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index f53bb43c..48ed2b20 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -26,6 +26,8 @@ #include <rte_common.h> #include <rte_hexdump.h> #include <rte_atomic.h> +#include <rte_spinlock.h> +#include <rte_io.h> #include "mlx5_utils.h" #include "mlx5.h" @@ -34,6 +36,9 @@ #include "mlx5_defs.h" #include "mlx5_prm.h" +/* Support tunnel matching. */ +#define MLX5_FLOW_TUNNEL 5 + struct mlx5_rxq_stats { unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS @@ -93,7 +98,7 @@ struct mlx5_rxq_data { volatile uint32_t *cq_db; uint16_t port_id; uint16_t rq_ci; - uint16_t strd_ci; /* Stride index in a WQE for Multi-Packet RQ. */ + uint16_t consumed_strd; /* Number of consumed strides in WQE. */ uint16_t rq_pi; uint16_t cq_ci; struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ @@ -115,6 +120,10 @@ struct mlx5_rxq_data { void *cq_uar; /* CQ user access region. */ uint32_t cqn; /* CQ number. */ uint8_t cq_arm_sn; /* CQ arm seq number. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t *uar_lock_cq; + /* CQ (UAR) access lock required for 32bit implementations */ +#endif uint32_t tunnel; /* Tunnel information. */ } __rte_cache_aligned; @@ -136,9 +145,10 @@ struct mlx5_rxq_ctrl { struct priv *priv; /* Back pointer to private data. */ struct mlx5_rxq_data rxq; /* Data path structure. */ unsigned int socket; /* CPU socket ID for allocations. */ - uint32_t tunnel_types[16]; /* Tunnel type counter. */ unsigned int irq:1; /* Whether IRQ is enabled. */ uint16_t idx; /* Queue index. */ + uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ + uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ }; /* Indirection table. */ @@ -157,8 +167,6 @@ struct mlx5_hrxq { struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */ struct ibv_qp *qp; /* Verbs queue pair. */ uint64_t hash_fields; /* Verbs Hash fields. */ - uint32_t tunnel; /* Tunnel type. */ - uint32_t rss_level; /* RSS on tunnel level. */ uint32_t rss_key_len; /* Hash key length in bytes. */ uint8_t rss_key[]; /* Hash key. */ }; @@ -196,6 +204,10 @@ struct mlx5_txq_data { volatile void *bf_reg; /* Blueflame register remapped. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ struct mlx5_txq_stats stats; /* TX queue counters. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t *uar_lock; + /* UAR access lock required for 32bit implementations */ +#endif } __rte_cache_aligned; /* Verbs Rx queue elements. */ @@ -225,7 +237,6 @@ struct mlx5_txq_ctrl { /* mlx5_rxq.c */ extern uint8_t rss_hash_default_key[]; -extern const size_t rss_hash_default_key_len; int mlx5_check_mprq_support(struct rte_eth_dev *dev); int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); @@ -245,6 +256,8 @@ struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv); int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev); +void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev); int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev); struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, @@ -265,18 +278,21 @@ struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, struct mlx5_ind_table_ibv *ind_tbl); int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev); +void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev); struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - uint32_t tunnel, uint32_t rss_level); + int tunnel __rte_unused); struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - uint32_t tunnel, uint32_t rss_level); + const uint16_t *queues, uint32_t queues_n); int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq); int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev); +void mlx5_hrxq_drop_release(struct rte_eth_dev *dev); uint64_t mlx5_get_rx_port_offloads(void); uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); @@ -348,6 +364,63 @@ void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr); uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr); +/** + * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and + * 64bit architectures. + * + * @param val + * value to write in CPU endian format. + * @param addr + * Address to write to. + * @param lock + * Address of the lock to use for that UAR access. + */ +static __rte_always_inline void +__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr, + rte_spinlock_t *lock __rte_unused) +{ +#ifdef RTE_ARCH_64 + rte_write64_relaxed(val, addr); +#else /* !RTE_ARCH_64 */ + rte_spinlock_lock(lock); + rte_write32_relaxed(val, addr); + rte_io_wmb(); + rte_write32_relaxed(val >> 32, + (volatile void *)((volatile char *)addr + 4)); + rte_spinlock_unlock(lock); +#endif +} + +/** + * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and + * 64bit architectures while guaranteeing the order of execution with the + * code being executed. + * + * @param val + * value to write in CPU endian format. + * @param addr + * Address to write to. + * @param lock + * Address of the lock to use for that UAR access. + */ +static __rte_always_inline void +__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock) +{ + rte_io_wmb(); + __mlx5_uar_write64_relaxed(val, addr, lock); +} + +/* Assist macros, used instead of directly calling the functions they wrap. */ +#ifdef RTE_ARCH_64 +#define mlx5_uar_write64_relaxed(val, dst, lock) \ + __mlx5_uar_write64_relaxed(val, dst, NULL) +#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL) +#else +#define mlx5_uar_write64_relaxed(val, dst, lock) \ + __mlx5_uar_write64_relaxed(val, dst, lock) +#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock) +#endif + #ifndef NDEBUG /** * Verify or set magic value in CQE. @@ -362,7 +435,7 @@ static inline int check_cqe_seen(volatile struct mlx5_cqe *cqe) { static const uint8_t magic[] = "seen"; - volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0; + volatile uint8_t (*buf)[sizeof(cqe->rsvd1)] = &cqe->rsvd1; int ret = 1; unsigned int i; @@ -614,7 +687,7 @@ mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); - *dst = *src; + mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock); if (cond) rte_wmb(); } diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h index 598dc751..fb884f92 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec.h @@ -91,9 +91,9 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; unsigned int i; - assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH); + assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)); assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); - assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP); + assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP); /* Not to cross queue end. */ n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index 71a5eaf2..b37b7381 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -107,8 +107,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, assert(elts_n > pkts_n); mlx5_tx_complete(txq); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); if (unlikely(!pkts_n)) return 0; for (n = 0; n < pkts_n; ++n) { @@ -176,12 +174,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } #ifdef MLX5_PMD_SOFT_COUNTERS txq->stats.opackets += n; @@ -245,8 +242,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, assert(elts_n > pkts_n); mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts); if (unlikely(!pkts_n)) @@ -282,11 +277,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) { txq->elts_comp += pkts_n; } else { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request a completion. */ txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif comp_req = 8; } /* Fill CTRL in the header. */ @@ -739,7 +733,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). */ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); - if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH) + if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)) mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); /* See if there're unreturned mbufs from compressed CQE. */ rcvd_pkt = rxq->cq_ci - rxq->rq_pi; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h index 3e985d61..54b3783c 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h @@ -107,8 +107,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, assert(elts_n > pkts_n); mlx5_tx_complete(txq); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); if (unlikely(!pkts_n)) return 0; for (n = 0; n < pkts_n; ++n) { @@ -177,12 +175,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif } #ifdef MLX5_PMD_SOFT_COUNTERS txq->stats.opackets += n; @@ -244,8 +241,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, assert(elts_n > pkts_n); mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); - /* A CQE slot must always be available. */ - assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts); assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr); @@ -283,11 +278,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) { txq->elts_comp += pkts_n; } else { + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); /* Request a completion. */ txq->elts_comp = 0; -#ifndef NDEBUG - ++txq->cq_pi; -#endif comp_req = 8; } /* Fill CTRL in the header. */ @@ -724,7 +718,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). */ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); - if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH) + if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)) mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); /* See if there're unreturned mbufs from compressed CQE. */ rcvd_pkt = rxq->cq_ci - rxq->rq_pi; diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index 99297d5c..a3a52291 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -36,6 +36,12 @@ mlx5_socket_init(struct rte_eth_dev *dev) int flags; /* + * Close the last socket that was used to communicate + * with the secondary process + */ + if (priv->primary_socket) + mlx5_socket_uninit(dev); + /* * Initialise the socket to communicate with the secondary * process. */ diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 875dd102..91f3d474 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -146,7 +146,7 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) et_stats->cmd = ETHTOOL_GSTATS; et_stats->n_stats = xstats_ctrl->stats_n; ifr.ifr_data = (caddr_t)et_stats; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); if (ret) { DRV_LOG(WARNING, "port %u unable to read statistic values from device", @@ -194,7 +194,7 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); if (ret) { DRV_LOG(WARNING, "port %u unable to query number of statistics", dev->data->port_id); @@ -244,7 +244,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev) strings->string_set = ETH_SS_STATS; strings->len = dev_stats_n; ifr.ifr_data = (caddr_t)strings; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); if (ret) { DRV_LOG(WARNING, "port %u unable to get statistic names", dev->data->port_id); diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 3e7c0a90..e2a9bb70 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -46,7 +46,6 @@ mlx5_txq_start(struct rte_eth_dev *dev) unsigned int i; int ret; - /* Add memory regions to Tx queues. */ for (i = 0; i != priv->txqs_n; ++i) { struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); @@ -60,12 +59,17 @@ mlx5_txq_start(struct rte_eth_dev *dev) } } ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd); - if (ret) + if (ret) { + /* Adjust index for rollback. */ + i = priv->txqs_n - 1; goto error; + } return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_txq_stop(dev); + do { + mlx5_txq_release(dev, i); + } while (i-- != 0); rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } @@ -103,8 +107,10 @@ mlx5_rxq_start(struct rte_eth_dev *dev) int ret = 0; /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ - if (mlx5_mprq_alloc_mp(dev)) - goto error; + if (mlx5_mprq_alloc_mp(dev)) { + /* Should not release Rx queues but return immediately. */ + return -rte_errno; + } for (i = 0; i != priv->rxqs_n; ++i) { struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); struct rte_mempool *mp; @@ -130,7 +136,9 @@ mlx5_rxq_start(struct rte_eth_dev *dev) return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_rxq_stop(dev); + do { + mlx5_rxq_release(dev, i); + } while (i-- != 0); rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } @@ -152,23 +160,21 @@ mlx5_dev_start(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; int ret; - dev->data->dev_started = 1; - DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues", - dev->data->port_id); + DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); ret = mlx5_txq_start(dev); if (ret) { DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", dev->data->port_id, strerror(rte_errno)); - goto error; + return -rte_errno; } ret = mlx5_rxq_start(dev); if (ret) { DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", dev->data->port_id, strerror(rte_errno)); - goto error; + mlx5_txq_stop(dev); + return -rte_errno; } - if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG) - mlx5_mr_dump_dev(dev); + dev->data->dev_started = 1; ret = mlx5_rx_intr_vec_enable(dev); if (ret) { DRV_LOG(ERR, "port %u Rx interrupt vector creation failed", @@ -223,8 +229,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev) dev->tx_pkt_burst = removed_tx_burst; rte_wmb(); usleep(1000 * priv->rxqs_n); - DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues", - dev->data->port_id); + DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); mlx5_flow_stop(dev, &priv->flows); mlx5_traffic_disable(dev); mlx5_rx_intr_vec_disable(dev); @@ -302,9 +307,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) struct rte_flow_item_vlan vlan_spec = { .tci = rte_cpu_to_be_16(vlan), }; - struct rte_flow_item_vlan vlan_mask = { - .tci = 0xffff, - }; + struct rte_flow_item_vlan vlan_mask = + rte_flow_item_vlan_mask; ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, &vlan_spec, &vlan_mask); @@ -341,9 +345,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) struct rte_flow_item_vlan vlan_spec = { .tci = rte_cpu_to_be_16(vlan), }; - struct rte_flow_item_vlan vlan_mask = { - .tci = 0xffff, - }; + struct rte_flow_item_vlan vlan_mask = + rte_flow_item_vlan_mask; ret = mlx5_ctrl_flow_vlan(dev, &unicast, &unicast_mask, diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 691ea071..f9bc4739 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -113,15 +113,20 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) DEV_TX_OFFLOAD_TCP_CKSUM); if (config->tso) offloads |= DEV_TX_OFFLOAD_TCP_TSO; + if (config->swp) { + if (config->hw_csum) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (config->tso) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + } + if (config->tunnel_en) { if (config->hw_csum) offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; if (config->tso) offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO); - if (config->swp) - offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | - DEV_TX_OFFLOAD_UDP_TNL_TSO); } return offloads; } @@ -250,6 +255,9 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); +#ifndef RTE_ARCH_64 + unsigned int lock_idx; +#endif memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -276,7 +284,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) } /* new address in reserved UAR address space. */ addr = RTE_PTR_ADD(priv->uar_base, - uar_va & (MLX5_UAR_SIZE - 1)); + uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1)); if (!already_mapped) { pages[pages_n++] = uar_va; /* fixed mmap to specified address in reserved @@ -300,6 +308,12 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) else assert(txq_ctrl->txq.bf_reg == RTE_PTR_ADD((void *)addr, off)); +#ifndef RTE_ARCH_64 + /* Assign a UAR lock according to UAR page number */ + lock_idx = (txq_ctrl->uar_mmap_offset / page_size) & + MLX5_UAR_PAGE_NUM_MASK; + txq->uar_lock = &priv->uar_lock[lock_idx]; +#endif } return 0; } @@ -429,7 +443,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, /* Primary port number. */ - .port_num = priv->port + .port_num = 1, }; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT)); @@ -506,6 +520,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_atomic32_inc(&txq_ibv->refcnt); if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; + DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx", + dev->data->port_id, txq_ctrl->uar_mmap_offset); } else { DRV_LOG(ERR, "port %u failed to retrieve UAR info, invalid" @@ -514,8 +530,6 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_errno = EINVAL; goto error; } - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt)); LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); txq_ibv->txq_ctrl = txq_ctrl; priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -553,12 +567,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->txqs)[idx]) return NULL; txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (txq_ctrl->ibv) { + if (txq_ctrl->ibv) rte_atomic32_inc(&txq_ctrl->ibv->refcnt); - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - dev->data->port_id, txq_ctrl->idx, - rte_atomic32_read(&txq_ctrl->ibv->refcnt)); - } return txq_ctrl->ibv; } @@ -575,9 +585,6 @@ int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - PORT_ID(txq_ibv->txq_ctrl->priv), - txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq)); @@ -716,7 +723,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) max_tso_inline); txq_ctrl->txq.tso_en = 1; } - txq_ctrl->txq.tunnel_en = config->tunnel_en; + txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp; txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | DEV_TX_OFFLOAD_UDP_TNL_TSO | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) & @@ -778,8 +785,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id, - idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; error: @@ -809,9 +814,6 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) txq); mlx5_txq_ibv_get(dev, idx); rte_atomic32_inc(&ctrl->refcnt); - DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d", - dev->data->port_id, - ctrl->idx, rte_atomic32_read(&ctrl->refcnt)); } return ctrl; } @@ -837,8 +839,6 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id, - txq->idx, rte_atomic32_read(&txq->refcnt)); if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv)) txq->ibv = NULL; if (priv->uar_base) |