diff options
author | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2017-05-16 14:51:32 +0200 |
---|---|---|
committer | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2017-05-16 16:20:45 +0200 |
commit | 7595afa4d30097c1177b69257118d8ad89a539be (patch) | |
tree | 4bfeadc905c977e45e54a90c42330553b8942e4e /drivers/net/mlx5 | |
parent | ce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff) |
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net/mlx5')
-rw-r--r-- | drivers/net/mlx5/Makefile | 15 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5.c | 223 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5.h | 57 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_defs.h | 14 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_ethdev.c | 126 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_fdir.c | 15 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_flow.c | 1586 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_mac.c | 16 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_prm.h | 124 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rss.c | 18 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxq.c | 239 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx.c | 1097 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx.h | 30 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_stats.c | 365 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_trigger.c | 32 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_txq.c | 162 |
16 files changed, 3633 insertions, 486 deletions
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index cf87f0b1..c0799591 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -48,13 +48,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c - -# Dependencies. -DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether -DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mbuf -DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_eal -DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mempool -DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_kvargs +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c # Basic CFLAGS. CFLAGS += -O3 @@ -104,7 +98,7 @@ endif mlx5_autoconf.h.new: FORCE -mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh +mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q $(RM) -f -- '$@' $Q sh -- '$<' '$@' \ HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \ @@ -136,6 +130,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh /usr/include/linux/ethtool.h \ enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \ $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_UPDATE_CQ_CI \ + infiniband/mlx5_hw.h \ + func ibv_mlx5_exp_update_cq_ci \ + $(AUTOCONF_OUTPUT) # Create mlx5_autoconf.h or update it in case it differs from the new one. diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index cb45fd0f..fc99c0d5 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -56,6 +56,7 @@ #endif #include <rte_malloc.h> #include <rte_ethdev.h> +#include <rte_ethdev_pci.h> #include <rte_pci.h> #include <rte_common.h> #include <rte_kvargs.h> @@ -84,6 +85,27 @@ /* Device parameter to enable multi-packet send WQEs. */ #define MLX5_TXQ_MPW_EN "txq_mpw_en" +/* Device parameter to include 2 dsegs in the title WQEBB. */ +#define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" + +/* Device parameter to limit the size of inlining packet. */ +#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" + +/* Device parameter to enable hardware TSO offload. */ +#define MLX5_TSO "tso" + +/* Default PMD specific parameter value. */ +#define MLX5_ARG_UNSET (-1) + +struct mlx5_args { + int cqe_comp; + int txq_inline; + int txqs_inline; + int mps; + int mpw_hdr_dseg; + int inline_max_packet_sz; + int tso; +}; /** * Retrieve integer value from environment variable. * @@ -199,6 +221,9 @@ static const struct eth_dev_ops mlx5_dev_ops = { .link_update = mlx5_link_update, .stats_get = mlx5_stats_get, .stats_reset = mlx5_stats_reset, + .xstats_get = mlx5_xstats_get, + .xstats_reset = mlx5_xstats_reset, + .xstats_get_names = mlx5_xstats_get_names, .dev_infos_get = mlx5_dev_infos_get, .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, .vlan_filter_set = mlx5_vlan_filter_set, @@ -219,6 +244,10 @@ static const struct eth_dev_ops mlx5_dev_ops = { .rss_hash_update = mlx5_rss_hash_update, .rss_hash_conf_get = mlx5_rss_hash_conf_get, .filter_ctrl = mlx5_dev_filter_ctrl, + .rx_descriptor_status = mlx5_rx_descriptor_status, + .tx_descriptor_status = mlx5_tx_descriptor_status, + .rx_queue_intr_enable = mlx5_rx_intr_enable, + .rx_queue_intr_disable = mlx5_rx_intr_disable, }; static struct { @@ -270,7 +299,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr) static int mlx5_args_check(const char *key, const char *val, void *opaque) { - struct priv *priv = opaque; + struct mlx5_args *args = opaque; unsigned long tmp; errno = 0; @@ -280,13 +309,19 @@ mlx5_args_check(const char *key, const char *val, void *opaque) return errno; } if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { - priv->cqe_comp = !!tmp; + args->cqe_comp = !!tmp; } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { - priv->txq_inline = tmp; + args->txq_inline = tmp; } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { - priv->txqs_inline = tmp; + args->txqs_inline = tmp; } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { - priv->mps = !!tmp; + args->mps = !!tmp; + } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { + args->mpw_hdr_dseg = !!tmp; + } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { + args->inline_max_packet_sz = tmp; + } else if (strcmp(MLX5_TSO, key) == 0) { + args->tso = !!tmp; } else { WARN("%s: unknown parameter", key); return -EINVAL; @@ -306,13 +341,16 @@ mlx5_args_check(const char *key, const char *val, void *opaque) * 0 on success, errno value on failure. */ static int -mlx5_args(struct priv *priv, struct rte_devargs *devargs) +mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs) { const char **params = (const char *[]){ MLX5_RXQ_CQE_COMP_EN, MLX5_TXQ_INLINE, MLX5_TXQS_MIN_INLINE, MLX5_TXQ_MPW_EN, + MLX5_TXQ_MPW_HDR_DSEG_EN, + MLX5_TXQ_MAX_INLINE_LEN, + MLX5_TSO, NULL, }; struct rte_kvargs *kvlist; @@ -329,7 +367,7 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs) for (i = 0; (params[i] != NULL); ++i) { if (rte_kvargs_count(kvlist, params[i])) { ret = rte_kvargs_process(kvlist, params[i], - mlx5_args_check, priv); + mlx5_args_check, args); if (ret != 0) { rte_kvargs_free(kvlist); return ret; @@ -340,7 +378,35 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs) return 0; } -static struct eth_driver mlx5_driver; +static struct rte_pci_driver mlx5_driver; + +/** + * Assign parameters from args into priv, only non default + * values are considered. + * + * @param[out] priv + * Pointer to private structure. + * @param[in] args + * Pointer to args values. + */ +static void +mlx5_args_assign(struct priv *priv, struct mlx5_args *args) +{ + if (args->cqe_comp != MLX5_ARG_UNSET) + priv->cqe_comp = args->cqe_comp; + if (args->txq_inline != MLX5_ARG_UNSET) + priv->txq_inline = args->txq_inline; + if (args->txqs_inline != MLX5_ARG_UNSET) + priv->txqs_inline = args->txqs_inline; + if (args->mps != MLX5_ARG_UNSET) + priv->mps = args->mps ? priv->mps : 0; + if (args->mpw_hdr_dseg != MLX5_ARG_UNSET) + priv->mpw_hdr_dseg = args->mpw_hdr_dseg; + if (args->inline_max_packet_sz != MLX5_ARG_UNSET) + priv->inline_max_packet_sz = args->inline_max_packet_sz; + if (args->tso != MLX5_ARG_UNSET) + priv->tso = args->tso; +} /** * DPDK callback to register a PCI device. @@ -366,11 +432,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_device_attr device_attr; unsigned int sriov; unsigned int mps; + unsigned int tunnel_en; int idx; int i; (void)pci_drv; - assert(pci_drv == &mlx5_driver.pci_drv); + assert(pci_drv == &mlx5_driver); /* Get mlx5_dev[] index. */ idx = mlx5_dev_idx(&pci_dev->addr); if (idx == -1) { @@ -384,10 +451,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) list = ibv_get_device_list(&i); if (list == NULL) { assert(errno); - if (errno == ENOSYS) { - WARN("cannot list devices, is ib_uverbs loaded?"); - return 0; - } + if (errno == ENOSYS) + ERROR("cannot list devices, is ib_uverbs loaded?"); return -errno; } assert(i >= 0); @@ -410,15 +475,39 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) sriov = ((pci_dev->id.device_id == PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)); - /* Multi-packet send is only supported by ConnectX-4 Lx PF. */ - mps = (pci_dev->id.device_id == - PCI_DEVICE_ID_MELLANOX_CONNECTX4LX); + PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) || + (pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) || + (pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)); + /* + * Multi-packet send is supported by ConnectX-4 Lx PF as well + * as all ConnectX-5 devices. + */ + switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4: + tunnel_en = 1; + mps = MLX5_MPW_DISABLED; + break; + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: + mps = MLX5_MPW; + break; + case PCI_DEVICE_ID_MELLANOX_CONNECTX5: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: + tunnel_en = 1; + mps = MLX5_MPW_ENHANCED; + break; + default: + mps = MLX5_MPW_DISABLED; + } INFO("PCI information matches, using device \"%s\"" - " (SR-IOV: %s, MPS: %s)", + " (SR-IOV: %s, %sMPS: %s)", list[i]->name, sriov ? "true" : "false", - mps ? "true" : "false"); + mps == MLX5_MPW_ENHANCED ? "Enhanced " : "", + mps != MLX5_MPW_DISABLED ? "true" : "false"); attr_ctx = ibv_open_device(list[i]); err = errno; break; @@ -427,11 +516,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) ibv_free_device_list(list); switch (err) { case 0: - WARN("cannot access device, is mlx5_ib loaded?"); - return 0; + ERROR("cannot access device, is mlx5_ib loaded?"); + return -ENODEV; case EINVAL: - WARN("cannot use device, are drivers up to date?"); - return 0; + ERROR("cannot use device, are drivers up to date?"); + return -EINVAL; } assert(err > 0); return -err; @@ -454,12 +543,22 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_exp_device_attr exp_device_attr; struct ether_addr mac; uint16_t num_vfs = 0; + struct mlx5_args args = { + .cqe_comp = MLX5_ARG_UNSET, + .txq_inline = MLX5_ARG_UNSET, + .txqs_inline = MLX5_ARG_UNSET, + .mps = MLX5_ARG_UNSET, + .mpw_hdr_dseg = MLX5_ARG_UNSET, + .inline_max_packet_sz = MLX5_ARG_UNSET, + .tso = MLX5_ARG_UNSET, + }; exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS | IBV_EXP_DEVICE_ATTR_RX_HASH | IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS | IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN | + IBV_EXP_DEVICE_ATTR_TSO_CAPS | 0; DEBUG("using port %u (%08" PRIx32 ")", port, test); @@ -513,12 +612,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->mtu = ETHER_MTU; priv->mps = mps; /* Enable MPW by default if supported. */ priv->cqe_comp = 1; /* Enable compression by default. */ - err = mlx5_args(priv, pci_dev->device.devargs); + priv->tunnel_en = tunnel_en; + err = mlx5_args(&args, pci_dev->device.devargs); if (err) { ERROR("failed to process device arguments: %s", strerror(err)); goto port_error; } + mlx5_args_assign(priv, &args); if (ibv_exp_query_device(ctx, &exp_device_attr)) { ERROR("ibv_exp_query_device() failed"); goto port_error; @@ -540,8 +641,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size; /* Remove this check once DPDK supports larger/variable * indirection tables. */ - if (priv->ind_table_max_size > (unsigned int)RSS_INDIRECTION_TABLE_SIZE) - priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; + if (priv->ind_table_max_size > + (unsigned int)ETH_RSS_RETA_SIZE_512) + priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512; DEBUG("maximum RX indirection table size is %u", priv->ind_table_max_size); priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap & @@ -560,11 +662,36 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv_get_num_vfs(priv, &num_vfs); priv->sriov = (num_vfs || sriov); + priv->tso = ((priv->tso) && + (exp_device_attr.tso_caps.max_tso > 0) && + (exp_device_attr.tso_caps.supported_qpts & + (1 << IBV_QPT_RAW_ETH))); + if (priv->tso) + priv->max_tso_payload_sz = + exp_device_attr.tso_caps.max_tso; if (priv->mps && !mps) { ERROR("multi-packet send not supported on this device" " (" MLX5_TXQ_MPW_EN ")"); err = ENOTSUP; goto port_error; + } else if (priv->mps && priv->tso) { + WARN("multi-packet send not supported in conjunction " + "with TSO. MPS disabled"); + priv->mps = 0; + } + INFO("%sMPS is %s", + priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "", + priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); + /* Set default values for Enhanced MPW, a.k.a MPWv2. */ + if (priv->mps == MLX5_MPW_ENHANCED) { + if (args.txqs_inline == MLX5_ARG_UNSET) + priv->txqs_inline = MLX5_EMPW_MIN_TXQS; + if (args.inline_max_packet_sz == MLX5_ARG_UNSET) + priv->inline_max_packet_sz = + MLX5_EMPW_MAX_INLINE_LEN; + if (args.txq_inline == MLX5_ARG_UNSET) + priv->txq_inline = MLX5_WQE_SIZE_MAX - + MLX5_WQE_SIZE; } /* Allocate and register default RSS hash keys. */ priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, @@ -654,23 +781,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) eth_dev->rx_pkt_burst = mlx5_rx_burst_secondary_setup; } else { eth_dev->data->dev_private = priv; - eth_dev->data->rx_mbuf_alloc_failed = 0; - eth_dev->data->mtu = ETHER_MTU; eth_dev->data->mac_addrs = priv->mac; } - eth_dev->pci_dev = pci_dev; + eth_dev->device = &pci_dev->device; rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->driver = &mlx5_driver; + eth_dev->device->driver = &mlx5_driver.driver; priv->dev = eth_dev; eth_dev->dev_ops = &mlx5_dev_ops; - TAILQ_INIT(ð_dev->link_intr_cbs); - /* Bring Ethernet device up. */ DEBUG("forcing Ethernet interface up"); priv_set_flags(priv, ~IFF_UP, IFF_UP); - mlx5_link_update_unlocked(priv->dev, 1); + mlx5_link_update(priv->dev, 1); continue; port_error: @@ -725,20 +848,33 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) }, { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) + }, + { .vendor_id = 0 } }; -static struct eth_driver mlx5_driver = { - .pci_drv = { - .driver = { - .name = MLX5_DRIVER_NAME - }, - .id_table = mlx5_pci_id_map, - .probe = mlx5_pci_probe, - .drv_flags = RTE_PCI_DRV_INTR_LSC, +static struct rte_pci_driver mlx5_driver = { + .driver = { + .name = MLX5_DRIVER_NAME }, - .dev_private_size = sizeof(struct priv) + .id_table = mlx5_pci_id_map, + .probe = mlx5_pci_probe, + .drv_flags = RTE_PCI_DRV_INTR_LSC, }; /** @@ -756,8 +892,9 @@ rte_mlx5_pmd_init(void) */ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); ibv_fork_init(); - rte_eal_pci_register(&mlx5_driver.pci_drv); + rte_pci_register(&mlx5_driver); } RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); +RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 79b7a600..67fd7428 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -59,6 +59,7 @@ #include <rte_spinlock.h> #include <rte_interrupts.h> #include <rte_errno.h> +#include <rte_flow.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif @@ -82,6 +83,18 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014, PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015, PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016, + PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017, + PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018, + PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019, + PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a, +}; + +struct mlx5_xstats_ctrl { + /* Number of device stats. */ + uint16_t stats_n; + /* Index in the device counters table. */ + uint16_t dev_table_idx[MLX5_MAX_XSTATS]; + uint64_t base[MLX5_MAX_XSTATS]; }; struct priv { @@ -110,11 +123,17 @@ struct priv { unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ unsigned int hw_padding:1; /* End alignment padding is supported. */ unsigned int sriov:1; /* This is a VF or PF with VF devices. */ - unsigned int mps:1; /* Whether multi-packet send is supported. */ + unsigned int mps:2; /* Multi-packet send mode (0: disabled). */ + unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */ unsigned int pending_alarm:1; /* An alarm is pending. */ + unsigned int tso:1; /* Whether TSO is supported. */ + unsigned int tunnel_en:1; + /* Whether Tx offloads for tunneled packets are supported. */ + unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int txq_inline; /* Maximum packet size for inlining. */ unsigned int txqs_inline; /* Queue number threshold for inlining. */ + unsigned int inline_max_packet_sz; /* Max packet size for inlining. */ /* RX/TX queues. */ unsigned int rxqs_n; /* RX queues array size. */ unsigned int txqs_n; /* TX queues array size. */ @@ -135,7 +154,10 @@ struct priv { unsigned int reta_idx_n; /* RETA index size. */ struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */ struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */ + struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */ + LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */ uint32_t link_speed_capa; /* Link speed capabilities. */ + struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ rte_spinlock_t lock; /* Lock for control functions. */ }; @@ -182,13 +204,14 @@ struct priv *mlx5_get_priv(struct rte_eth_dev *dev); int mlx5_is_secondary(void); int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); int priv_ifreq(const struct priv *, int req, struct ifreq *); +int priv_is_ib_cntr(const char *); +int priv_get_cntr_sysfs(struct priv *, const char *, uint64_t *); int priv_get_num_vfs(struct priv *, uint16_t *); int priv_get_mtu(struct priv *, uint16_t *); int priv_set_flags(struct priv *, unsigned int, unsigned int); int mlx5_dev_configure(struct rte_eth_dev *); void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *); const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); -int mlx5_link_update_unlocked(struct rte_eth_dev *, int); int mlx5_link_update(struct rte_eth_dev *, int); int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t); int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); @@ -196,7 +219,7 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); int mlx5_ibv_device_to_pci_addr(const struct ibv_device *, struct rte_pci_addr *); void mlx5_dev_link_status_handler(void *); -void mlx5_dev_interrupt_handler(struct rte_intr_handle *, void *); +void mlx5_dev_interrupt_handler(void *); void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *); void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *); int mlx5_set_link_down(struct rte_eth_dev *dev); @@ -215,8 +238,8 @@ int hash_rxq_mac_addrs_add(struct hash_rxq *); int priv_mac_addr_add(struct priv *, unsigned int, const uint8_t (*)[ETHER_ADDR_LEN]); int priv_mac_addrs_enable(struct priv *); -void mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t, - uint32_t); +int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t, + uint32_t); void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *); /* mlx5_rss.c */ @@ -244,8 +267,14 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *); /* mlx5_stats.c */ +void priv_xstats_init(struct priv *); void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *); void mlx5_stats_reset(struct rte_eth_dev *); +int mlx5_xstats_get(struct rte_eth_dev *, + struct rte_eth_xstat *, unsigned int); +void mlx5_xstats_reset(struct rte_eth_dev *); +int mlx5_xstats_get_names(struct rte_eth_dev *, + struct rte_eth_xstat_name *, unsigned int); /* mlx5_vlan.c */ @@ -268,4 +297,22 @@ void priv_fdir_enable(struct priv *); int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type, enum rte_filter_op, void *); +/* mlx5_flow.c */ + +int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *, + const struct rte_flow_item [], + const struct rte_flow_action [], + struct rte_flow_error *); +struct rte_flow *mlx5_flow_create(struct rte_eth_dev *, + const struct rte_flow_attr *, + const struct rte_flow_item [], + const struct rte_flow_action [], + struct rte_flow_error *); +int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *, + struct rte_flow_error *); +int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); +int priv_flow_start(struct priv *); +void priv_flow_stop(struct priv *); +int priv_flow_rxq_in_use(struct priv *, struct rxq *); + #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index b32816e6..201bb336 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -54,8 +54,12 @@ */ #define MLX5_TX_COMP_THRESH 32 -/* RSS Indirection table size. */ -#define RSS_INDIRECTION_TABLE_SIZE 256 +/* + * Request TX completion every time the total number of WQEBBs used for inlining + * packets exceeds the size of WQ divided by this divisor. Better to be power of + * two for performance. + */ +#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3) /* * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP @@ -79,4 +83,10 @@ /* Alarm timeout. */ #define MLX5_ALARM_TIMEOUT_US 100000 +/* Maximum number of extended statistics counters. */ +#define MLX5_MAX_XSTATS 32 + +/* Maximum Packet headers size (L2+L3+L4) for TSO. */ +#define MLX5_MAX_TSO_HEADER 128 + #endif /* RTE_PMD_MLX5_DEFS_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 06cfd016..3fd22cb8 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -234,6 +234,23 @@ try_dev_id: } /** + * Check if the counter is located on ib counters file. + * + * @param[in] cntr + * Counter name. + * + * @return + * 1 if counter is located on ib counters file , 0 otherwise. + */ +int +priv_is_ib_cntr(const char *cntr) +{ + if (!strcmp(cntr, "out_of_buffer")) + return 1; + return 0; +} + +/** * Read from sysfs entry. * * @param[in] priv @@ -260,10 +277,15 @@ priv_sysfs_read(const struct priv *priv, const char *entry, if (priv_get_ifname(priv, &ifname)) return -1; - MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, - ifname, entry); - - file = fopen(path, "rb"); + if (priv_is_ib_cntr(entry)) { + MKSTR(path, "%s/ports/1/hw_counters/%s", + priv->ctx->device->ibdev_path, entry); + file = fopen(path, "rb"); + } else { + MKSTR(path, "%s/device/net/%s/%s", + priv->ctx->device->ibdev_path, ifname, entry); + file = fopen(path, "rb"); + } if (file == NULL) return -1; ret = fread(buf, 1, size, file); @@ -469,6 +491,30 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu) } /** + * Read device counter from sysfs. + * + * @param priv + * Pointer to private structure. + * @param name + * Counter name. + * @param[out] cntr + * Counter output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +int +priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr) +{ + unsigned long ulong_ctr; + + if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1) + return -1; + *cntr = ulong_ctr; + return 0; +} + +/** * Set device MTU. * * @param priv @@ -615,6 +661,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) unsigned int max; char ifname[IF_NAMESIZE]; + info->pci_dev = RTE_DEV_TO_PCI(dev->device); + priv_lock(priv); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; @@ -645,14 +693,16 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM); + if (priv->tso) + info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + if (priv->tunnel_en) + info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO); if (priv_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); - /* FIXME: RETA update/query API expects the callee to know the size of - * the indirection table, for this PMD the size varies depending on - * the number of RX queues, it becomes impossible to find the correct - * size if it is not fixed. - * The API should be updated to solve this problem. */ - info->reta_size = priv->ind_table_max_size; + info->reta_size = priv->reta_idx_n ? + priv->reta_idx_n : priv->ind_table_max_size; info->hash_key_size = ((*priv->rss_conf) ? (*priv->rss_conf)[0]->rss_key_len : 0); @@ -665,10 +715,10 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { /* refers to rxq_cq_to_pkt_type() */ - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV6, - RTE_PTYPE_INNER_L3_IPV4, - RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, RTE_PTYPE_UNKNOWN }; @@ -679,7 +729,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) } /** - * Retrieve physical link information (unlocked version using legacy ioctl). + * DPDK callback to retrieve physical link information. * * @param dev * Pointer to Ethernet device structure. @@ -697,6 +747,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) struct rte_eth_link dev_link; int link_speed = 0; + /* priv_lock() is not taken to allow concurrent calls. */ + (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); @@ -827,7 +879,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) } /** - * DPDK callback to retrieve physical link information (unlocked version). + * DPDK callback to retrieve physical link information. * * @param dev * Pointer to Ethernet device structure. @@ -835,7 +887,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) * Wait for request completion (ignored). */ int -mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct utsname utsname; int ver[3]; @@ -849,26 +901,6 @@ mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete) } /** - * DPDK callback to retrieve physical link information. - * - * @param dev - * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). - */ -int -mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) -{ - struct priv *priv = mlx5_get_priv(dev); - int ret; - - priv_lock(priv); - ret = mlx5_link_update_unlocked(dev, wait_to_complete); - priv_unlock(priv); - return ret; -} - -/** * DPDK callback to change the MTU. * * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be @@ -959,7 +991,6 @@ recover: struct rxq *rxq = (*priv->rxqs)[i]; struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); - int sp; unsigned int mb_len; unsigned int tmp; @@ -967,10 +998,9 @@ recover: continue; mb_len = rte_pktmbuf_data_room_size(rxq->mp); assert(mb_len >= RTE_PKTMBUF_HEADROOM); - /* Toggle scattered support (sp) if necessary. */ - sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM)); /* Provide new values to rxq_setup(). */ - dev->data->dev_conf.rxmode.jumbo_frame = sp; + dev->data->dev_conf.rxmode.jumbo_frame = + (max_frame_len > ETHER_MAX_LEN); dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; if (rehash) ret = rxq_rehash(dev, rxq_ctrl); @@ -1244,13 +1274,12 @@ mlx5_dev_link_status_handler(void *arg) * Callback argument. */ void -mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg) +mlx5_dev_interrupt_handler(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; struct priv *priv = dev->data->dev_private; int ret; - (void)intr_handle; priv_lock(priv); ret = priv_dev_link_status_handler(priv, dev); priv_unlock(priv); @@ -1553,14 +1582,15 @@ void priv_select_tx_function(struct priv *priv) { priv->dev->tx_pkt_burst = mlx5_tx_burst; - /* Display warning for unsupported configurations. */ - if (priv->sriov && priv->mps) - WARN("multi-packet send WQE cannot be used on a SR-IOV setup"); /* Select appropriate TX function. */ - if ((priv->sriov == 0) && priv->mps && priv->txq_inline) { + if (priv->mps == MLX5_MPW_ENHANCED) { + priv->dev->tx_pkt_burst = + mlx5_tx_burst_empw; + DEBUG("selected Enhanced MPW TX function"); + } else if (priv->mps && priv->txq_inline) { priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline; DEBUG("selected MPW inline TX function"); - } else if ((priv->sriov == 0) && priv->mps) { + } else if (priv->mps) { priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw; DEBUG("selected MPW TX function"); } diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c index 1acf6826..f80c58b4 100644 --- a/drivers/net/mlx5/mlx5_fdir.c +++ b/drivers/net/mlx5/mlx5_fdir.c @@ -55,6 +55,8 @@ #include <rte_malloc.h> #include <rte_ethdev.h> #include <rte_common.h> +#include <rte_flow.h> +#include <rte_flow_driver.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif @@ -1042,6 +1044,14 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) return ret; } +static const struct rte_flow_ops mlx5_flow_ops = { + .validate = mlx5_flow_validate, + .create = mlx5_flow_create, + .destroy = mlx5_flow_destroy, + .flush = mlx5_flow_flush, + .query = NULL, +}; + /** * Manage filter operations. * @@ -1067,6 +1077,11 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, struct priv *priv = dev->data->dev_private; switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &mlx5_flow_ops; + return 0; case RTE_ETH_FILTER_FDIR: priv_lock(priv); ret = priv_fdir_ctrl_func(priv, filter_op, arg); diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c new file mode 100644 index 00000000..adcbe3f5 --- /dev/null +++ b/drivers/net/mlx5/mlx5_flow.c @@ -0,0 +1,1586 @@ +/*- + * BSD LICENSE + * + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <string.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include <rte_ethdev.h> +#include <rte_flow.h> +#include <rte_flow_driver.h> +#include <rte_malloc.h> + +#include "mlx5.h" +#include "mlx5_prm.h" + +/* Number of Work Queue necessary for the DROP queue. */ +#define MLX5_DROP_WQ_N 4 + +static int +mlx5_flow_create_eth(const struct rte_flow_item *item, + const void *default_mask, + void *data); + +static int +mlx5_flow_create_vlan(const struct rte_flow_item *item, + const void *default_mask, + void *data); + +static int +mlx5_flow_create_ipv4(const struct rte_flow_item *item, + const void *default_mask, + void *data); + +static int +mlx5_flow_create_ipv6(const struct rte_flow_item *item, + const void *default_mask, + void *data); + +static int +mlx5_flow_create_udp(const struct rte_flow_item *item, + const void *default_mask, + void *data); + +static int +mlx5_flow_create_tcp(const struct rte_flow_item *item, + const void *default_mask, + void *data); + +static int +mlx5_flow_create_vxlan(const struct rte_flow_item *item, + const void *default_mask, + void *data); + +struct rte_flow { + LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ + struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */ + struct ibv_qp *qp; /**< Verbs queue pair. */ + struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */ + struct ibv_exp_wq *wq; /**< Verbs work queue. */ + struct ibv_cq *cq; /**< Verbs completion queue. */ + uint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */ + uint32_t mark:1; /**< Set if the flow is marked. */ + uint32_t drop:1; /**< Drop queue. */ + uint64_t hash_fields; /**< Fields that participate in the hash. */ + struct rxq *rxqs[]; /**< Pointer to the queues array. */ +}; + +/** Static initializer for items. */ +#define ITEMS(...) \ + (const enum rte_flow_item_type []){ \ + __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ + } + +/** Structure to generate a simple graph of layers supported by the NIC. */ +struct mlx5_flow_items { + /** List of possible actions for these items. */ + const enum rte_flow_action_type *const actions; + /** Bit-masks corresponding to the possibilities for the item. */ + const void *mask; + /** + * Default bit-masks to use when item->mask is not provided. When + * \default_mask is also NULL, the full supported bit-mask (\mask) is + * used instead. + */ + const void *default_mask; + /** Bit-masks size in bytes. */ + const unsigned int mask_sz; + /** + * Conversion function from rte_flow to NIC specific flow. + * + * @param item + * rte_flow item to convert. + * @param default_mask + * Default bit-masks to use when item->mask is not provided. + * @param data + * Internal structure to store the conversion. + * + * @return + * 0 on success, negative value otherwise. + */ + int (*convert)(const struct rte_flow_item *item, + const void *default_mask, + void *data); + /** Size in bytes of the destination structure. */ + const unsigned int dst_sz; + /** List of possible following items. */ + const enum rte_flow_item_type *const items; +}; + +/** Valid action for this PMD. */ +static const enum rte_flow_action_type valid_actions[] = { + RTE_FLOW_ACTION_TYPE_DROP, + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_FLAG, + RTE_FLOW_ACTION_TYPE_END, +}; + +/** Graph of supported items and associated actions. */ +static const struct mlx5_flow_items mlx5_flow_items[] = { + [RTE_FLOW_ITEM_TYPE_END] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VXLAN), + }, + [RTE_FLOW_ITEM_TYPE_ETH] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_eth){ + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .type = -1, + }, + .default_mask = &rte_flow_item_eth_mask, + .mask_sz = sizeof(struct rte_flow_item_eth), + .convert = mlx5_flow_create_eth, + .dst_sz = sizeof(struct ibv_exp_flow_spec_eth), + }, + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_vlan){ + .tci = -1, + }, + .default_mask = &rte_flow_item_vlan_mask, + .mask_sz = sizeof(struct rte_flow_item_vlan), + .convert = mlx5_flow_create_vlan, + .dst_sz = 0, + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_TCP), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_ipv4){ + .hdr = { + .src_addr = -1, + .dst_addr = -1, + .type_of_service = -1, + .next_proto_id = -1, + }, + }, + .default_mask = &rte_flow_item_ipv4_mask, + .mask_sz = sizeof(struct rte_flow_item_ipv4), + .convert = mlx5_flow_create_ipv4, + .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext), + }, + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_TCP), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_ipv6){ + .hdr = { + .src_addr = { + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, + .dst_addr = { + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + }, + .vtc_flow = -1, + .proto = -1, + .hop_limits = -1, + }, + }, + .default_mask = &rte_flow_item_ipv6_mask, + .mask_sz = sizeof(struct rte_flow_item_ipv6), + .convert = mlx5_flow_create_ipv6, + .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6_ext), + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_udp){ + .hdr = { + .src_port = -1, + .dst_port = -1, + }, + }, + .default_mask = &rte_flow_item_udp_mask, + .mask_sz = sizeof(struct rte_flow_item_udp), + .convert = mlx5_flow_create_udp, + .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp), + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .actions = valid_actions, + .mask = &(const struct rte_flow_item_tcp){ + .hdr = { + .src_port = -1, + .dst_port = -1, + }, + }, + .default_mask = &rte_flow_item_tcp_mask, + .mask_sz = sizeof(struct rte_flow_item_tcp), + .convert = mlx5_flow_create_tcp, + .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp), + }, + [RTE_FLOW_ITEM_TYPE_VXLAN] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_vxlan){ + .vni = "\xff\xff\xff", + }, + .default_mask = &rte_flow_item_vxlan_mask, + .mask_sz = sizeof(struct rte_flow_item_vxlan), + .convert = mlx5_flow_create_vxlan, + .dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel), + }, +}; + +/** Structure to pass to the conversion function. */ +struct mlx5_flow { + struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */ + unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */ + uint32_t inner; /**< Set once VXLAN is encountered. */ + uint64_t hash_fields; /**< Fields that participate in the hash. */ +}; + +/** Structure for Drop queue. */ +struct rte_flow_drop { + struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */ + struct ibv_qp *qp; /**< Verbs queue pair. */ + struct ibv_exp_wq *wqs[MLX5_DROP_WQ_N]; /**< Verbs work queue. */ + struct ibv_cq *cq; /**< Verbs completion queue. */ +}; + +struct mlx5_flow_action { + uint32_t queue:1; /**< Target is a receive queue. */ + uint32_t drop:1; /**< Target is a drop queue. */ + uint32_t mark:1; /**< Mark is present in the flow. */ + uint32_t mark_id; /**< Mark identifier. */ + uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ + uint16_t queues_n; /**< Number of entries in queue[]. */ +}; + +/** + * Check support for a given item. + * + * @param item[in] + * Item specification. + * @param mask[in] + * Bit-masks covering supported fields to compare with spec, last and mask in + * \item. + * @param size + * Bit-Mask size in bytes. + * + * @return + * 0 on success. + */ +static int +mlx5_flow_item_validate(const struct rte_flow_item *item, + const uint8_t *mask, unsigned int size) +{ + int ret = 0; + + if (!item->spec && (item->mask || item->last)) + return -1; + if (item->spec && !item->mask) { + unsigned int i; + const uint8_t *spec = item->spec; + + for (i = 0; i < size; ++i) + if ((spec[i] | mask[i]) != mask[i]) + return -1; + } + if (item->last && !item->mask) { + unsigned int i; + const uint8_t *spec = item->last; + + for (i = 0; i < size; ++i) + if ((spec[i] | mask[i]) != mask[i]) + return -1; + } + if (item->mask) { + unsigned int i; + const uint8_t *spec = item->mask; + + for (i = 0; i < size; ++i) + if ((spec[i] | mask[i]) != mask[i]) + return -1; + } + if (item->spec && item->last) { + uint8_t spec[size]; + uint8_t last[size]; + const uint8_t *apply = mask; + unsigned int i; + + if (item->mask) + apply = item->mask; + for (i = 0; i < size; ++i) { + spec[i] = ((const uint8_t *)item->spec)[i] & apply[i]; + last[i] = ((const uint8_t *)item->last)[i] & apply[i]; + } + ret = memcmp(spec, last, size); + } + return ret; +} + +/** + * Validate a flow supported by the NIC. + * + * @param priv + * Pointer to private structure. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. + * @param[in, out] flow + * Flow structure to update. + * @param[in, out] action + * Action structure to update. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +priv_flow_validate(struct priv *priv, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct mlx5_flow *flow, + struct mlx5_flow_action *action) +{ + const struct mlx5_flow_items *cur_item = mlx5_flow_items; + + (void)priv; + if (attr->group) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "groups are not supported"); + return -rte_errno; + } + if (attr->priority) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, + "priorities are not supported"); + return -rte_errno; + } + if (attr->egress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "egress is not supported"); + return -rte_errno; + } + if (!attr->ingress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "only ingress is supported"); + return -rte_errno; + } + for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { + const struct mlx5_flow_items *token = NULL; + unsigned int i; + int err; + + if (items->type == RTE_FLOW_ITEM_TYPE_VOID) + continue; + for (i = 0; + cur_item->items && + cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END; + ++i) { + if (cur_item->items[i] == items->type) { + token = &mlx5_flow_items[items->type]; + break; + } + } + if (!token) + goto exit_item_not_supported; + cur_item = token; + err = mlx5_flow_item_validate(items, + (const uint8_t *)cur_item->mask, + cur_item->mask_sz); + if (err) + goto exit_item_not_supported; + if (flow->ibv_attr && cur_item->convert) { + err = cur_item->convert(items, + (cur_item->default_mask ? + cur_item->default_mask : + cur_item->mask), + flow); + if (err) + goto exit_item_not_supported; + } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) { + if (flow->inner) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + items, + "cannot recognize multiple" + " VXLAN encapsulations"); + return -rte_errno; + } + flow->inner = 1; + } + flow->offset += cur_item->dst_sz; + } + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { + if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { + continue; + } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { + action->drop = 1; + } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + const struct rte_flow_action_queue *queue = + (const struct rte_flow_action_queue *) + actions->conf; + uint16_t n; + uint16_t found = 0; + + if (!queue || (queue->index > (priv->rxqs_n - 1))) + goto exit_action_not_supported; + for (n = 0; n < action->queues_n; ++n) { + if (action->queues[n] == queue->index) { + found = 1; + break; + } + } + if (action->queues_n > 1 && !found) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "queue action not in RSS queues"); + return -rte_errno; + } + if (!found) { + action->queue = 1; + action->queues_n = 1; + action->queues[0] = queue->index; + } + } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { + const struct rte_flow_action_rss *rss = + (const struct rte_flow_action_rss *) + actions->conf; + uint16_t n; + + if (!rss || !rss->num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "no valid queues"); + return -rte_errno; + } + if (action->queues_n == 1) { + uint16_t found = 0; + + assert(action->queues_n); + for (n = 0; n < rss->num; ++n) { + if (action->queues[0] == + rss->queue[n]) { + found = 1; + break; + } + } + if (!found) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "queue action not in RSS" + " queues"); + return -rte_errno; + } + } + for (n = 0; n < rss->num; ++n) { + if (rss->queue[n] >= priv->rxqs_n) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "queue id > number of" + " queues"); + return -rte_errno; + } + } + action->queue = 1; + for (n = 0; n < rss->num; ++n) + action->queues[n] = rss->queue[n]; + action->queues_n = rss->num; + } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) { + const struct rte_flow_action_mark *mark = + (const struct rte_flow_action_mark *) + actions->conf; + + if (!mark) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "mark must be defined"); + return -rte_errno; + } else if (mark->id >= MLX5_FLOW_MARK_MAX) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "mark must be between 0" + " and 16777199"); + return -rte_errno; + } + action->mark = 1; + action->mark_id = mark->id; + } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { + action->mark = 1; + } else { + goto exit_action_not_supported; + } + } + if (action->mark && !flow->ibv_attr && !action->drop) + flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag); + if (!action->queue && !action->drop) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "no valid action"); + return -rte_errno; + } + return 0; +exit_item_not_supported: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + items, "item not supported"); + return -rte_errno; +exit_action_not_supported: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + actions, "action not supported"); + return -rte_errno; +} + +/** + * Validate a flow supported by the NIC. + * + * @see rte_flow_validate() + * @see rte_flow_ops + */ +int +mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + int ret; + struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) }; + struct mlx5_flow_action action = { + .queue = 0, + .drop = 0, + .mark = 0, + .mark_id = MLX5_FLOW_MARK_DEFAULT, + .queues_n = 0, + }; + + priv_lock(priv); + ret = priv_flow_validate(priv, attr, items, actions, error, &flow, + &action); + priv_unlock(priv); + return ret; +} + +/** + * Convert Ethernet item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + */ +static int +mlx5_flow_create_eth(const struct rte_flow_item *item, + const void *default_mask, + void *data) +{ + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *mask = item->mask; + struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct ibv_exp_flow_spec_eth *eth; + const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth); + unsigned int i; + + ++flow->ibv_attr->num_of_specs; + flow->ibv_attr->priority = 2; + flow->hash_fields = 0; + eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + *eth = (struct ibv_exp_flow_spec_eth) { + .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH, + .size = eth_size, + }; + if (!spec) + return 0; + if (!mask) + mask = default_mask; + memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN); + memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN); + eth->val.ether_type = spec->type; + memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN); + memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN); + eth->mask.ether_type = mask->type; + /* Remove unwanted bits from values. */ + for (i = 0; i < ETHER_ADDR_LEN; ++i) { + eth->val.dst_mac[i] &= eth->mask.dst_mac[i]; + eth->val.src_mac[i] &= eth->mask.src_mac[i]; + } + eth->val.ether_type &= eth->mask.ether_type; + return 0; +} + +/** + * Convert VLAN item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + */ +static int +mlx5_flow_create_vlan(const struct rte_flow_item *item, + const void *default_mask, + void *data) +{ + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = item->mask; + struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct ibv_exp_flow_spec_eth *eth; + const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth); + + eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size); + if (!spec) + return 0; + if (!mask) + mask = default_mask; + eth->val.vlan_tag = spec->tci; + eth->mask.vlan_tag = mask->tci; + eth->val.vlan_tag &= eth->mask.vlan_tag; + return 0; +} + +/** + * Convert IPv4 item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + */ +static int +mlx5_flow_create_ipv4(const struct rte_flow_item *item, + const void *default_mask, + void *data) +{ + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *mask = item->mask; + struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct ibv_exp_flow_spec_ipv4_ext *ipv4; + unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext); + + ++flow->ibv_attr->num_of_specs; + flow->ibv_attr->priority = 1; + flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | + IBV_EXP_RX_HASH_DST_IPV4); + ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + *ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) { + .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT, + .size = ipv4_size, + }; + if (!spec) + return 0; + if (!mask) + mask = default_mask; + ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){ + .src_ip = spec->hdr.src_addr, + .dst_ip = spec->hdr.dst_addr, + .proto = spec->hdr.next_proto_id, + .tos = spec->hdr.type_of_service, + }; + ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){ + .src_ip = mask->hdr.src_addr, + .dst_ip = mask->hdr.dst_addr, + .proto = mask->hdr.next_proto_id, + .tos = mask->hdr.type_of_service, + }; + /* Remove unwanted bits from values. */ + ipv4->val.src_ip &= ipv4->mask.src_ip; + ipv4->val.dst_ip &= ipv4->mask.dst_ip; + ipv4->val.proto &= ipv4->mask.proto; + ipv4->val.tos &= ipv4->mask.tos; + return 0; +} + +/** + * Convert IPv6 item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + */ +static int +mlx5_flow_create_ipv6(const struct rte_flow_item *item, + const void *default_mask, + void *data) +{ + const struct rte_flow_item_ipv6 *spec = item->spec; + const struct rte_flow_item_ipv6 *mask = item->mask; + struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct ibv_exp_flow_spec_ipv6_ext *ipv6; + unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6_ext); + unsigned int i; + + ++flow->ibv_attr->num_of_specs; + flow->ibv_attr->priority = 1; + flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6); + ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + *ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) { + .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT, + .size = ipv6_size, + }; + if (!spec) + return 0; + if (!mask) + mask = default_mask; + memcpy(ipv6->val.src_ip, spec->hdr.src_addr, + RTE_DIM(ipv6->val.src_ip)); + memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr, + RTE_DIM(ipv6->val.dst_ip)); + memcpy(ipv6->mask.src_ip, mask->hdr.src_addr, + RTE_DIM(ipv6->mask.src_ip)); + memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr, + RTE_DIM(ipv6->mask.dst_ip)); + ipv6->mask.flow_label = mask->hdr.vtc_flow; + ipv6->mask.next_hdr = mask->hdr.proto; + ipv6->mask.hop_limit = mask->hdr.hop_limits; + /* Remove unwanted bits from values. */ + for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) { + ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i]; + ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i]; + } + ipv6->val.flow_label &= ipv6->mask.flow_label; + ipv6->val.next_hdr &= ipv6->mask.next_hdr; + ipv6->val.hop_limit &= ipv6->mask.hop_limit; + return 0; +} + +/** + * Convert UDP item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + */ +static int +mlx5_flow_create_udp(const struct rte_flow_item *item, + const void *default_mask, + void *data) +{ + const struct rte_flow_item_udp *spec = item->spec; + const struct rte_flow_item_udp *mask = item->mask; + struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct ibv_exp_flow_spec_tcp_udp *udp; + unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp); + + ++flow->ibv_attr->num_of_specs; + flow->ibv_attr->priority = 0; + flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP | + IBV_EXP_RX_HASH_DST_PORT_UDP); + udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + *udp = (struct ibv_exp_flow_spec_tcp_udp) { + .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP, + .size = udp_size, + }; + if (!spec) + return 0; + if (!mask) + mask = default_mask; + udp->val.dst_port = spec->hdr.dst_port; + udp->val.src_port = spec->hdr.src_port; + udp->mask.dst_port = mask->hdr.dst_port; + udp->mask.src_port = mask->hdr.src_port; + /* Remove unwanted bits from values. */ + udp->val.src_port &= udp->mask.src_port; + udp->val.dst_port &= udp->mask.dst_port; + return 0; +} + +/** + * Convert TCP item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + */ +static int +mlx5_flow_create_tcp(const struct rte_flow_item *item, + const void *default_mask, + void *data) +{ + const struct rte_flow_item_tcp *spec = item->spec; + const struct rte_flow_item_tcp *mask = item->mask; + struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct ibv_exp_flow_spec_tcp_udp *tcp; + unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp); + + ++flow->ibv_attr->num_of_specs; + flow->ibv_attr->priority = 0; + flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP | + IBV_EXP_RX_HASH_DST_PORT_TCP); + tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + *tcp = (struct ibv_exp_flow_spec_tcp_udp) { + .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP, + .size = tcp_size, + }; + if (!spec) + return 0; + if (!mask) + mask = default_mask; + tcp->val.dst_port = spec->hdr.dst_port; + tcp->val.src_port = spec->hdr.src_port; + tcp->mask.dst_port = mask->hdr.dst_port; + tcp->mask.src_port = mask->hdr.src_port; + /* Remove unwanted bits from values. */ + tcp->val.src_port &= tcp->mask.src_port; + tcp->val.dst_port &= tcp->mask.dst_port; + return 0; +} + +/** + * Convert VXLAN item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + */ +static int +mlx5_flow_create_vxlan(const struct rte_flow_item *item, + const void *default_mask, + void *data) +{ + const struct rte_flow_item_vxlan *spec = item->spec; + const struct rte_flow_item_vxlan *mask = item->mask; + struct mlx5_flow *flow = (struct mlx5_flow *)data; + struct ibv_exp_flow_spec_tunnel *vxlan; + unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel); + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id; + + ++flow->ibv_attr->num_of_specs; + flow->ibv_attr->priority = 0; + id.vni[0] = 0; + vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + *vxlan = (struct ibv_exp_flow_spec_tunnel) { + .type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + flow->inner = IBV_EXP_FLOW_SPEC_INNER; + if (!spec) + return 0; + if (!mask) + mask = default_mask; + memcpy(&id.vni[1], spec->vni, 3); + vxlan->val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan->mask.tunnel_id = id.vlan_id; + /* Remove unwanted bits from values. */ + vxlan->val.tunnel_id &= vxlan->mask.tunnel_id; + return 0; +} + +/** + * Convert mark/flag action to Verbs specification. + * + * @param flow + * Pointer to MLX5 flow structure. + * @param mark_id + * Mark identifier. + */ +static int +mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id) +{ + struct ibv_exp_flow_spec_action_tag *tag; + unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag); + + tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset); + *tag = (struct ibv_exp_flow_spec_action_tag){ + .type = IBV_EXP_FLOW_SPEC_ACTION_TAG, + .size = size, + .tag_id = mlx5_flow_mark_set(mark_id), + }; + ++flow->ibv_attr->num_of_specs; + return 0; +} + +/** + * Complete flow rule creation with a drop queue. + * + * @param priv + * Pointer to private structure. + * @param flow + * MLX5 flow attributes (filled by mlx5_flow_validate()). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A flow if the rule could be created. + */ +static struct rte_flow * +priv_flow_create_action_queue_drop(struct priv *priv, + struct mlx5_flow *flow, + struct rte_flow_error *error) +{ + struct rte_flow *rte_flow; + + assert(priv->pd); + assert(priv->ctx); + rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0); + if (!rte_flow) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "cannot allocate flow memory"); + return NULL; + } + rte_flow->drop = 1; + rte_flow->ibv_attr = flow->ibv_attr; + rte_flow->qp = priv->flow_drop_queue->qp; + if (!priv->started) + return rte_flow; + rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp, + rte_flow->ibv_attr); + if (!rte_flow->ibv_flow) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "flow rule creation failure"); + goto error; + } + return rte_flow; +error: + assert(rte_flow); + rte_free(rte_flow); + return NULL; +} + +/** + * Complete flow rule creation. + * + * @param priv + * Pointer to private structure. + * @param flow + * MLX5 flow attributes (filled by mlx5_flow_validate()). + * @param action + * Target action structure. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A flow if the rule could be created. + */ +static struct rte_flow * +priv_flow_create_action_queue(struct priv *priv, + struct mlx5_flow *flow, + struct mlx5_flow_action *action, + struct rte_flow_error *error) +{ + struct rte_flow *rte_flow; + unsigned int i; + unsigned int j; + const unsigned int wqs_n = 1 << log2above(action->queues_n); + struct ibv_exp_wq *wqs[wqs_n]; + + assert(priv->pd); + assert(priv->ctx); + assert(!action->drop); + rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow) + + sizeof(*rte_flow->rxqs) * action->queues_n, 0); + if (!rte_flow) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "cannot allocate flow memory"); + return NULL; + } + for (i = 0; i < action->queues_n; ++i) { + struct rxq_ctrl *rxq; + + rxq = container_of((*priv->rxqs)[action->queues[i]], + struct rxq_ctrl, rxq); + wqs[i] = rxq->wq; + rte_flow->rxqs[i] = &rxq->rxq; + ++rte_flow->rxqs_n; + rxq->rxq.mark |= action->mark; + } + /* finalise indirection table. */ + for (j = 0; i < wqs_n; ++i, ++j) { + wqs[i] = wqs[j]; + if (j == action->queues_n) + j = 0; + } + rte_flow->mark = action->mark; + rte_flow->ibv_attr = flow->ibv_attr; + rte_flow->hash_fields = flow->hash_fields; + rte_flow->ind_table = ibv_exp_create_rwq_ind_table( + priv->ctx, + &(struct ibv_exp_rwq_ind_table_init_attr){ + .pd = priv->pd, + .log_ind_tbl_size = log2above(action->queues_n), + .ind_tbl = wqs, + .comp_mask = 0, + }); + if (!rte_flow->ind_table) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "cannot allocate indirection table"); + goto error; + } + rte_flow->qp = ibv_exp_create_qp( + priv->ctx, + &(struct ibv_exp_qp_init_attr){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_PORT | + IBV_EXP_QP_INIT_ATTR_RX_HASH, + .pd = priv->pd, + .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){ + .rx_hash_function = + IBV_EXP_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_hash_default_key_len, + .rx_hash_key = rss_hash_default_key, + .rx_hash_fields_mask = rte_flow->hash_fields, + .rwq_ind_tbl = rte_flow->ind_table, + }, + .port_num = priv->port, + }); + if (!rte_flow->qp) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "cannot allocate QP"); + goto error; + } + if (!priv->started) + return rte_flow; + rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp, + rte_flow->ibv_attr); + if (!rte_flow->ibv_flow) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "flow rule creation failure"); + goto error; + } + return rte_flow; +error: + assert(rte_flow); + if (rte_flow->qp) + ibv_destroy_qp(rte_flow->qp); + if (rte_flow->ind_table) + ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table); + rte_free(rte_flow); + return NULL; +} + +/** + * Convert a flow. + * + * @param priv + * Pointer to private structure. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A flow on success, NULL otherwise. + */ +static struct rte_flow * +priv_flow_create(struct priv *priv, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow *rte_flow; + struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), }; + struct mlx5_flow_action action = { + .queue = 0, + .drop = 0, + .mark = 0, + .mark_id = MLX5_FLOW_MARK_DEFAULT, + .queues_n = 0, + }; + int err; + + err = priv_flow_validate(priv, attr, items, actions, error, &flow, + &action); + if (err) + goto exit; + flow.ibv_attr = rte_malloc(__func__, flow.offset, 0); + flow.offset = sizeof(struct ibv_exp_flow_attr); + if (!flow.ibv_attr) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "cannot allocate ibv_attr memory"); + goto exit; + } + *flow.ibv_attr = (struct ibv_exp_flow_attr){ + .type = IBV_EXP_FLOW_ATTR_NORMAL, + .size = sizeof(struct ibv_exp_flow_attr), + .priority = attr->priority, + .num_of_specs = 0, + .port = 0, + .flags = 0, + .reserved = 0, + }; + flow.inner = 0; + flow.hash_fields = 0; + claim_zero(priv_flow_validate(priv, attr, items, actions, + error, &flow, &action)); + if (action.mark && !action.drop) { + mlx5_flow_create_flag_mark(&flow, action.mark_id); + flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag); + } + if (action.drop) + rte_flow = + priv_flow_create_action_queue_drop(priv, &flow, error); + else + rte_flow = priv_flow_create_action_queue(priv, &flow, &action, + error); + if (!rte_flow) + goto exit; + return rte_flow; +exit: + rte_free(flow.ibv_attr); + return NULL; +} + +/** + * Create a flow. + * + * @see rte_flow_create() + * @see rte_flow_ops + */ +struct rte_flow * +mlx5_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + struct rte_flow *flow; + + priv_lock(priv); + flow = priv_flow_create(priv, attr, items, actions, error); + if (flow) { + LIST_INSERT_HEAD(&priv->flows, flow, next); + DEBUG("Flow created %p", (void *)flow); + } + priv_unlock(priv); + return flow; +} + +/** + * Destroy a flow. + * + * @param priv + * Pointer to private structure. + * @param[in] flow + * Flow to destroy. + */ +static void +priv_flow_destroy(struct priv *priv, + struct rte_flow *flow) +{ + (void)priv; + LIST_REMOVE(flow, next); + if (flow->ibv_flow) + claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); + if (flow->drop) + goto free; + if (flow->qp) + claim_zero(ibv_destroy_qp(flow->qp)); + if (flow->ind_table) + claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table)); + if (flow->drop && flow->wq) + claim_zero(ibv_exp_destroy_wq(flow->wq)); + if (flow->drop && flow->cq) + claim_zero(ibv_destroy_cq(flow->cq)); + if (flow->mark) { + struct rte_flow *tmp; + struct rxq *rxq; + uint32_t mark_n = 0; + uint32_t queue_n; + + /* + * To remove the mark from the queue, the queue must not be + * present in any other marked flow (RSS or not). + */ + for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) { + rxq = flow->rxqs[queue_n]; + for (tmp = LIST_FIRST(&priv->flows); + tmp; + tmp = LIST_NEXT(tmp, next)) { + uint32_t tqueue_n; + + if (tmp->drop) + continue; + for (tqueue_n = 0; + tqueue_n < tmp->rxqs_n; + ++tqueue_n) { + struct rxq *trxq; + + trxq = tmp->rxqs[tqueue_n]; + if (rxq == trxq) + ++mark_n; + } + } + rxq->mark = !!mark_n; + } + } +free: + rte_free(flow->ibv_attr); + DEBUG("Flow destroyed %p", (void *)flow); + rte_free(flow); +} + +/** + * Destroy a flow. + * + * @see rte_flow_destroy() + * @see rte_flow_ops + */ +int +mlx5_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + + (void)error; + priv_lock(priv); + priv_flow_destroy(priv, flow); + priv_unlock(priv); + return 0; +} + +/** + * Destroy all flows. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_flow_flush(struct priv *priv) +{ + while (!LIST_EMPTY(&priv->flows)) { + struct rte_flow *flow; + + flow = LIST_FIRST(&priv->flows); + priv_flow_destroy(priv, flow); + } +} + +/** + * Destroy all flows. + * + * @see rte_flow_flush() + * @see rte_flow_ops + */ +int +mlx5_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct priv *priv = dev->data->dev_private; + + (void)error; + priv_lock(priv); + priv_flow_flush(priv); + priv_unlock(priv); + return 0; +} + +/** + * Create drop queue. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success. + */ +static int +priv_flow_create_drop_queue(struct priv *priv) +{ + struct rte_flow_drop *fdq = NULL; + unsigned int i; + + assert(priv->pd); + assert(priv->ctx); + fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); + if (!fdq) { + WARN("cannot allocate memory for drop queue"); + goto error; + } + fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0, + &(struct ibv_exp_cq_init_attr){ + .comp_mask = 0, + }); + if (!fdq->cq) { + WARN("cannot allocate CQ for drop queue"); + goto error; + } + for (i = 0; i != MLX5_DROP_WQ_N; ++i) { + fdq->wqs[i] = ibv_exp_create_wq(priv->ctx, + &(struct ibv_exp_wq_init_attr){ + .wq_type = IBV_EXP_WQT_RQ, + .max_recv_wr = 1, + .max_recv_sge = 1, + .pd = priv->pd, + .cq = fdq->cq, + }); + if (!fdq->wqs[i]) { + WARN("cannot allocate WQ for drop queue"); + goto error; + } + } + fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, + &(struct ibv_exp_rwq_ind_table_init_attr){ + .pd = priv->pd, + .log_ind_tbl_size = 0, + .ind_tbl = fdq->wqs, + .comp_mask = 0, + }); + if (!fdq->ind_table) { + WARN("cannot allocate indirection table for drop queue"); + goto error; + } + fdq->qp = ibv_exp_create_qp(priv->ctx, + &(struct ibv_exp_qp_init_attr){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_PORT | + IBV_EXP_QP_INIT_ATTR_RX_HASH, + .pd = priv->pd, + .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){ + .rx_hash_function = + IBV_EXP_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_hash_default_key_len, + .rx_hash_key = rss_hash_default_key, + .rx_hash_fields_mask = 0, + .rwq_ind_tbl = fdq->ind_table, + }, + .port_num = priv->port, + }); + if (!fdq->qp) { + WARN("cannot allocate QP for drop queue"); + goto error; + } + priv->flow_drop_queue = fdq; + return 0; +error: + if (fdq->qp) + claim_zero(ibv_destroy_qp(fdq->qp)); + if (fdq->ind_table) + claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table)); + for (i = 0; i != MLX5_DROP_WQ_N; ++i) { + if (fdq->wqs[i]) + claim_zero(ibv_exp_destroy_wq(fdq->wqs[i])); + } + if (fdq->cq) + claim_zero(ibv_destroy_cq(fdq->cq)); + if (fdq) + rte_free(fdq); + priv->flow_drop_queue = NULL; + return -1; +} + +/** + * Delete drop queue. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_flow_delete_drop_queue(struct priv *priv) +{ + struct rte_flow_drop *fdq = priv->flow_drop_queue; + unsigned int i; + + if (!fdq) + return; + if (fdq->qp) + claim_zero(ibv_destroy_qp(fdq->qp)); + if (fdq->ind_table) + claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table)); + for (i = 0; i != MLX5_DROP_WQ_N; ++i) { + if (fdq->wqs[i]) + claim_zero(ibv_exp_destroy_wq(fdq->wqs[i])); + } + if (fdq->cq) + claim_zero(ibv_destroy_cq(fdq->cq)); + rte_free(fdq); + priv->flow_drop_queue = NULL; +} + +/** + * Remove all flows. + * + * Called by dev_stop() to remove all flows. + * + * @param priv + * Pointer to private structure. + */ +void +priv_flow_stop(struct priv *priv) +{ + struct rte_flow *flow; + + for (flow = LIST_FIRST(&priv->flows); + flow; + flow = LIST_NEXT(flow, next)) { + claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); + flow->ibv_flow = NULL; + if (flow->mark) { + unsigned int n; + + for (n = 0; n < flow->rxqs_n; ++n) + flow->rxqs[n]->mark = 0; + } + DEBUG("Flow %p removed", (void *)flow); + } + priv_flow_delete_drop_queue(priv); +} + +/** + * Add all flows. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, a errno value otherwise and rte_errno is set. + */ +int +priv_flow_start(struct priv *priv) +{ + int ret; + struct rte_flow *flow; + + ret = priv_flow_create_drop_queue(priv); + if (ret) + return -1; + for (flow = LIST_FIRST(&priv->flows); + flow; + flow = LIST_NEXT(flow, next)) { + struct ibv_qp *qp; + + if (flow->drop) + qp = priv->flow_drop_queue->qp; + else + qp = flow->qp; + flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr); + if (!flow->ibv_flow) { + DEBUG("Flow %p cannot be applied", (void *)flow); + rte_errno = EINVAL; + return rte_errno; + } + DEBUG("Flow %p applied", (void *)flow); + if (flow->mark) { + unsigned int n; + + for (n = 0; n < flow->rxqs_n; ++n) + flow->rxqs[n]->mark = 1; + } + } + return 0; +} + +/** + * Verify if the Rx queue is used in a flow. + * + * @param priv + * Pointer to private structure. + * @param rxq + * Pointer to the queue to search. + * + * @return + * Nonzero if the queue is used by a flow. + */ +int +priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq) +{ + struct rte_flow *flow; + + for (flow = LIST_FIRST(&priv->flows); + flow; + flow = LIST_NEXT(flow, next)) { + unsigned int n; + + if (flow->drop) + continue; + for (n = 0; n < flow->rxqs_n; ++n) { + if (flow->rxqs[n] == rxq) + return 1; + } + } + return 0; +} diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 4fcfd3b8..79e0c410 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -470,26 +470,30 @@ priv_mac_addrs_enable(struct priv *priv) * @param vmdq * VMDq pool index to associate address with (ignored). */ -void +int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t vmdq) { struct priv *priv = dev->data->dev_private; + int re; if (mlx5_is_secondary()) - return; + return -ENOTSUP; (void)vmdq; priv_lock(priv); DEBUG("%p: adding MAC address at index %" PRIu32, (void *)dev, index); - if (index >= RTE_DIM(priv->mac)) + if (index >= RTE_DIM(priv->mac)) { + re = EINVAL; goto end; - priv_mac_addr_add(priv, index, - (const uint8_t (*)[ETHER_ADDR_LEN]) - mac_addr->addr_bytes); + } + re = priv_mac_addr_add(priv, index, + (const uint8_t (*)[ETHER_ADDR_LEN]) + mac_addr->addr_bytes); end: priv_unlock(priv); + return -re; } /** diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index ed088eea..608072f7 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -34,6 +34,8 @@ #ifndef RTE_PMD_MLX5_PRM_H_ #define RTE_PMD_MLX5_PRM_H_ +#include <assert.h> + /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC @@ -44,6 +46,7 @@ #pragma GCC diagnostic error "-Wpedantic" #endif +#include <rte_vect.h> #include "mlx5_autoconf.h" /* Get CQE owner bit. */ @@ -70,6 +73,9 @@ /* WQE size */ #define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE) +/* Max size of a WQE session. */ +#define MLX5_WQE_SIZE_MAX 960U + /* Compute the number of DS. */ #define MLX5_WQE_DS(n) \ (((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE) @@ -77,10 +83,19 @@ /* Room for inline data in multi-packet WQE. */ #define MLX5_MWQE64_INL_DATA 28 +/* Default minimum number of Tx queues for inlining packets. */ +#define MLX5_EMPW_MIN_TXQS 8 + +/* Default max packet length to be inlined. */ +#define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) + #ifndef HAVE_VERBS_MLX5_OPCODE_TSO #define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */ #endif +#define MLX5_OPC_MOD_ENHANCED_MPSW 0 +#define MLX5_OPCODE_ENHANCED_MPSW 0x29 + /* CQE value to inform that VLAN is stripped. */ #define MLX5_CQE_VLAN_STRIPPED (1u << 0) @@ -117,6 +132,28 @@ /* Tunnel packet bit in the CQE. */ #define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0) +/* Inner L3 checksum offload (Tunneled packets only). */ +#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4) + +/* Inner L4 checksum offload (Tunneled packets only). */ +#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5) + +/* Is flow mark valid. */ +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00) +#else +#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff) +#endif + +/* INVALID is used by packets matching no flow rules. */ +#define MLX5_FLOW_MARK_INVALID 0 + +/* Maximum allowed value to mark a packet. */ +#define MLX5_FLOW_MARK_MAX 0xfffff0 + +/* Default mark value used when none is provided. */ +#define MLX5_FLOW_MARK_DEFAULT 0xffffff + /* Subset of struct mlx5_wqe_eth_seg. */ struct mlx5_wqe_eth_seg_small { uint32_t rsvd0; @@ -126,12 +163,19 @@ struct mlx5_wqe_eth_seg_small { uint32_t rsvd2; uint16_t inline_hdr_sz; uint8_t inline_hdr[2]; -}; +} __rte_aligned(MLX5_WQE_DWORD_SIZE); struct mlx5_wqe_inl_small { uint32_t byte_cnt; uint8_t raw; -}; +} __rte_aligned(MLX5_WQE_DWORD_SIZE); + +struct mlx5_wqe_ctrl { + uint32_t ctrl0; + uint32_t ctrl1; + uint32_t ctrl2; + uint32_t ctrl3; +} __rte_aligned(MLX5_WQE_DWORD_SIZE); /* Small common part of the WQE. */ struct mlx5_wqe { @@ -139,16 +183,30 @@ struct mlx5_wqe { struct mlx5_wqe_eth_seg_small eseg; }; +/* Vectorize WQE header. */ +struct mlx5_wqe_v { + rte_v128u32_t ctrl; + rte_v128u32_t eseg; +}; + /* WQE. */ struct mlx5_wqe64 { struct mlx5_wqe hdr; uint8_t raw[32]; -} __rte_aligned(64); +} __rte_aligned(MLX5_WQE_SIZE); + +/* MPW mode. */ +enum mlx5_mpw_mode { + MLX5_MPW_DISABLED, + MLX5_MPW, + MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */ +}; /* MPW session status. */ enum mlx5_mpw_state { MLX5_MPW_STATE_OPENED, MLX5_MPW_INL_STATE_OPENED, + MLX5_MPW_ENHANCED_STATE_OPENED, MLX5_MPW_STATE_CLOSED, }; @@ -180,10 +238,68 @@ struct mlx5_cqe { uint8_t rsvd2[12]; uint32_t byte_cnt; uint64_t timestamp; - uint8_t rsvd3[4]; + uint32_t sop_drop_qpn; uint16_t wqe_counter; uint8_t rsvd4; uint8_t op_own; }; +/** + * Convert a user mark to flow mark. + * + * @param val + * Mark value to convert. + * + * @return + * Converted mark value. + */ +static inline uint32_t +mlx5_flow_mark_set(uint32_t val) +{ + uint32_t ret; + + /* + * Add one to the user value to differentiate un-marked flows from + * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it + * remains untouched. + */ + if (val != MLX5_FLOW_MARK_DEFAULT) + ++val; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + /* + * Mark is 24 bits (minus reserved values) but is stored on a 32 bit + * word, byte-swapped by the kernel on little-endian systems. In this + * case, left-shifting the resulting big-endian value ensures the + * least significant 24 bits are retained when converting it back. + */ + ret = rte_cpu_to_be_32(val) >> 8; +#else + ret = val; +#endif + return ret; +} + +/** + * Convert a mark to user mark. + * + * @param val + * Mark value to convert. + * + * @return + * Converted mark value. + */ +static inline uint32_t +mlx5_flow_mark_get(uint32_t val) +{ + /* + * Subtract one from the retrieved value. It was added by + * mlx5_flow_mark_set() to distinguish unmarked flows. + */ +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + return (val >> 8) - 1; +#else + return val - 1; +#endif +} + #endif /* RTE_PMD_MLX5_PRM_H_ */ diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index 0bed74ee..a2dd7d17 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -257,13 +257,9 @@ priv_dev_rss_reta_query(struct priv *priv, { unsigned int idx; unsigned int i; - int ret; - - /* See RETA comment in mlx5_dev_infos_get(). */ - ret = priv_rss_reta_index_resize(priv, priv->ind_table_max_size); - if (ret) - return ret; + if (!reta_size || reta_size > priv->reta_idx_n) + return EINVAL; /* Fill each entry of the table even if its bit is not set. */ for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; @@ -296,8 +292,9 @@ priv_dev_rss_reta_update(struct priv *priv, unsigned int pos; int ret; - /* See RETA comment in mlx5_dev_infos_get(). */ - ret = priv_rss_reta_index_resize(priv, priv->ind_table_max_size); + if (!reta_size) + return EINVAL; + ret = priv_rss_reta_index_resize(priv, reta_size); if (ret) return ret; @@ -360,8 +357,11 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, int ret; struct priv *priv = dev->data->dev_private; + mlx5_dev_stop(dev); priv_lock(priv); ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); priv_unlock(priv); - return -ret; + if (ret) + return -ret; + return mlx5_dev_start(dev); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 28e93d3e..8b782336 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -36,6 +36,7 @@ #include <errno.h> #include <string.h> #include <stdint.h> +#include <fcntl.h> /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ @@ -57,6 +58,8 @@ #include <rte_malloc.h> #include <rte_ethdev.h> #include <rte_common.h> +#include <rte_interrupts.h> +#include <rte_debug.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif @@ -741,49 +744,16 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl) void rxq_cleanup(struct rxq_ctrl *rxq_ctrl) { - struct ibv_exp_release_intf_params params; - DEBUG("cleaning up %p", (void *)rxq_ctrl); rxq_free_elts(rxq_ctrl); if (rxq_ctrl->fdir_queue != NULL) priv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue); - if (rxq_ctrl->if_wq != NULL) { - assert(rxq_ctrl->priv != NULL); - assert(rxq_ctrl->priv->ctx != NULL); - assert(rxq_ctrl->wq != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx, - rxq_ctrl->if_wq, - ¶ms)); - } - if (rxq_ctrl->if_cq != NULL) { - assert(rxq_ctrl->priv != NULL); - assert(rxq_ctrl->priv->ctx != NULL); - assert(rxq_ctrl->cq != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx, - rxq_ctrl->if_cq, - ¶ms)); - } if (rxq_ctrl->wq != NULL) claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq)); if (rxq_ctrl->cq != NULL) claim_zero(ibv_destroy_cq(rxq_ctrl->cq)); - if (rxq_ctrl->rd != NULL) { - struct ibv_exp_destroy_res_domain_attr attr = { - .comp_mask = 0, - }; - - assert(rxq_ctrl->priv != NULL); - assert(rxq_ctrl->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(rxq_ctrl->priv->ctx, - rxq_ctrl->rd, - &attr)); - } + if (rxq_ctrl->channel != NULL) + claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel)); if (rxq_ctrl->mr != NULL) claim_zero(ibv_dereg_mr(rxq_ctrl->mr)); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); @@ -931,13 +901,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, }; struct ibv_exp_wq_attr mod; union { - struct ibv_exp_query_intf_params params; struct ibv_exp_cq_init_attr cq; - struct ibv_exp_res_domain_init_attr rd; struct ibv_exp_wq_init_attr wq; struct ibv_exp_cq_attr cq_attr; } attr; - enum ibv_exp_query_intf_status status; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); unsigned int cqe_n = desc - 1; struct rte_mbuf *(*elts)[desc] = NULL; @@ -946,14 +913,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, (void)conf; /* Thresholds configuration (ignored). */ /* Enable scattered packets support for this queue if necessary. */ assert(mb_len >= RTE_PKTMBUF_HEADROOM); - /* If smaller than MRU, multi-segment support must be enabled. */ - if (mb_len < (priv->mtu > dev->data->dev_conf.rxmode.max_rx_pkt_len ? - dev->data->dev_conf.rxmode.max_rx_pkt_len : - priv->mtu)) - dev->data->dev_conf.rxmode.jumbo_frame = 1; - if ((dev->data->dev_conf.rxmode.jumbo_frame) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (mb_len - RTE_PKTMBUF_HEADROOM))) { + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= + (mb_len - RTE_PKTMBUF_HEADROOM)) { + tmpl.rxq.sges_n = 0; + } else if (dev->data->dev_conf.rxmode.enable_scatter) { unsigned int size = RTE_PKTMBUF_HEADROOM + dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -976,6 +939,13 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, dev->data->dev_conf.rxmode.max_rx_pkt_len); return EOVERFLOW; } + } else { + WARN("%p: the requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered" + " mode has not been requested", + (void *)dev, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + mb_len - RTE_PKTMBUF_HEADROOM); } DEBUG("%p: maximum number of segments per packet: %u", (void *)dev, 1 << tmpl.rxq.sges_n); @@ -1001,29 +971,25 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, (void *)dev, strerror(ret)); goto error; } - attr.rd = (struct ibv_exp_res_domain_init_attr){ - .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | - IBV_EXP_RES_DOMAIN_MSG_MODEL), - .thread_model = IBV_EXP_THREAD_SINGLE, - .msg_model = IBV_EXP_MSG_HIGH_BW, - }; - tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); - if (tmpl.rd == NULL) { - ret = ENOMEM; - ERROR("%p: RD creation failure: %s", - (void *)dev, strerror(ret)); - goto error; + if (dev->data->dev_conf.intr_conf.rxq) { + tmpl.channel = ibv_create_comp_channel(priv->ctx); + if (tmpl.channel == NULL) { + dev->data->dev_conf.intr_conf.rxq = 0; + ret = ENOMEM; + ERROR("%p: Comp Channel creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } } attr.cq = (struct ibv_exp_cq_init_attr){ - .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, - .res_domain = tmpl.rd, + .comp_mask = 0, }; if (priv->cqe_comp) { attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS; attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE; cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */ } - tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, NULL, 0, + tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0, &attr.cq); if (tmpl.cq == NULL) { ret = ENOMEM; @@ -1048,10 +1014,8 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, .pd = priv->pd, .cq = tmpl.cq, .comp_mask = - IBV_EXP_CREATE_WQ_RES_DOMAIN | IBV_EXP_CREATE_WQ_VLAN_OFFLOADS | 0, - .res_domain = tmpl.rd, .vlan_offloads = (tmpl.rxq.vlan_strip ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0), @@ -1112,29 +1076,6 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, /* Save port ID. */ tmpl.rxq.port_id = dev->data->port_id; DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id); - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf_version = 1, - .intf = IBV_EXP_INTF_CQ, - .obj = tmpl.cq, - }; - tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_cq == NULL) { - ERROR("%p: CQ interface family query failed with status %d", - (void *)dev, status); - goto error; - } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_WQ, - .obj = tmpl.wq, - }; - tmpl.if_wq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_wq == NULL) { - ERROR("%p: WQ interface family query failed with status %d", - (void *)dev, status); - goto error; - } /* Change queue state to ready. */ mod = (struct ibv_exp_wq_attr){ .attr_mask = IBV_EXP_WQ_ATTR_STATE, @@ -1247,6 +1188,19 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } (*priv->rxqs)[idx] = NULL; rxq_cleanup(rxq_ctrl); + /* Resize if rxq size is changed. */ + if (rxq_ctrl->rxq.elts_n != log2above(desc)) { + rxq_ctrl = rte_realloc(rxq_ctrl, + sizeof(*rxq_ctrl) + + desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE); + if (!rxq_ctrl) { + ERROR("%p: unable to reallocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } } else { rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) + desc * sizeof(struct rte_mbuf *), @@ -1295,6 +1249,9 @@ mlx5_rx_queue_release(void *dpdk_rxq) rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); priv = rxq_ctrl->priv; priv_lock(priv); + if (priv_flow_rxq_in_use(priv, rxq)) + rte_panic("Rx queue %p is still used by a flow and cannot be" + " removed\n", (void *)rxq_ctrl); for (i = 0; (i != priv->rxqs_n); ++i) if ((*priv->rxqs)[i] == rxq) { DEBUG("%p: removing RX queue %p from list", @@ -1347,3 +1304,113 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts, rxq = (*priv->rxqs)[index]; return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n); } + +/** + * Fill epoll fd list for rxq interrupts. + * + * @param priv + * Private structure. + * + * @return + * 0 on success, negative on failure. + */ +int +priv_intr_efd_enable(struct priv *priv) +{ + unsigned int i; + unsigned int rxqs_n = priv->rxqs_n; + unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + + if (n == 0) + return 0; + if (n < rxqs_n) { + WARN("rxqs num is larger than EAL max interrupt vector " + "%u > %u unable to supprt rxq interrupts", + rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + return -EINVAL; + } + intr_handle->type = RTE_INTR_HANDLE_EXT; + for (i = 0; i != n; ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + struct rxq_ctrl *rxq_ctrl = + container_of(rxq, struct rxq_ctrl, rxq); + int fd = rxq_ctrl->channel->fd; + int flags; + int rc; + + flags = fcntl(fd, F_GETFL); + rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + WARN("failed to change rxq interrupt file " + "descriptor %d for queue index %d", fd, i); + return -1; + } + intr_handle->efds[i] = fd; + } + intr_handle->nb_efd = n; + return 0; +} + +/** + * Clean epoll fd list for rxq interrupts. + * + * @param priv + * Private structure. + */ +void +priv_intr_efd_disable(struct priv *priv) +{ + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + + rte_intr_free_epoll_fd(intr_handle); +} + +/** + * Create and init interrupt vector array. + * + * @param priv + * Private structure. + * + * @return + * 0 on success, negative on failure. + */ +int +priv_create_intr_vec(struct priv *priv) +{ + unsigned int rxqs_n = priv->rxqs_n; + unsigned int i; + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + + if (rxqs_n == 0) + return 0; + intr_handle->intr_vec = (int *) + rte_malloc("intr_vec", rxqs_n * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + WARN("Failed to allocate memory for intr_vec " + "rxq interrupt will not be supported"); + return -ENOMEM; + } + for (i = 0; i != rxqs_n; ++i) { + /* 1:1 mapping between rxq and interrupt. */ + intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; + } + return 0; +} + +/** + * Destroy init interrupt vector array. + * + * @param priv + * Private structure. + * + * @return + * 0 on success, negative on failure. + */ +void +priv_destroy_intr_vec(struct priv *priv) +{ + struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + + rte_free(intr_handle->intr_vec); +} diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 3997b27a..de6e0fa4 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -74,6 +74,9 @@ check_cqe(volatile struct mlx5_cqe *cqe, unsigned int cqes_n, const uint16_t ci) __attribute__((always_inline)); +static inline void +txq_complete(struct txq *txq) __attribute__((always_inline)); + static inline uint32_t txq_mp2mr(struct txq *txq, struct rte_mempool *mp) __attribute__((always_inline)); @@ -110,7 +113,7 @@ static inline int check_cqe_seen(volatile struct mlx5_cqe *cqe) { static const uint8_t magic[] = "seen"; - volatile uint8_t (*buf)[sizeof(cqe->rsvd3)] = &cqe->rsvd3; + volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0; int ret = 1; unsigned int i; @@ -173,8 +176,79 @@ check_cqe(volatile struct mlx5_cqe *cqe, return 0; } -static inline void -txq_complete(struct txq *txq) __attribute__((always_inline)); +/** + * Return the address of the WQE. + * + * @param txq + * Pointer to TX queue structure. + * @param wqe_ci + * WQE consumer index. + * + * @return + * WQE address. + */ +static inline uintptr_t * +tx_mlx5_wqe(struct txq *txq, uint16_t ci) +{ + ci &= ((1 << txq->wqe_n) - 1); + return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE); +} + +/** + * Return the size of tailroom of WQ. + * + * @param txq + * Pointer to TX queue structure. + * @param addr + * Pointer to tail of WQ. + * + * @return + * Size of tailroom. + */ +static inline size_t +tx_mlx5_wq_tailroom(struct txq *txq, void *addr) +{ + size_t tailroom; + tailroom = (uintptr_t)(txq->wqes) + + (1 << txq->wqe_n) * MLX5_WQE_SIZE - + (uintptr_t)addr; + return tailroom; +} + +/** + * Copy data to tailroom of circular queue. + * + * @param dst + * Pointer to destination. + * @param src + * Pointer to source. + * @param n + * Number of bytes to copy. + * @param base + * Pointer to head of queue. + * @param tailroom + * Size of tailroom from dst. + * + * @return + * Pointer after copied data. + */ +static inline void * +mlx5_copy_to_wq(void *dst, const void *src, size_t n, + void *base, size_t tailroom) +{ + void *ret; + + if (n > tailroom) { + rte_memcpy(dst, src, tailroom); + rte_memcpy(base, (void *)((uintptr_t)src + tailroom), + n - tailroom); + ret = (uint8_t *)base + n - tailroom; + } else { + rte_memcpy(dst, src, n); + ret = (n == tailroom) ? base : (uint8_t *)dst + n; + } + return ret; +} /** * Manage TX completions. @@ -194,7 +268,7 @@ txq_complete(struct txq *txq) uint16_t elts_tail; uint16_t cq_ci = txq->cq_ci; volatile struct mlx5_cqe *cqe = NULL; - volatile struct mlx5_wqe *wqe; + volatile struct mlx5_wqe_ctrl *ctrl; do { volatile struct mlx5_cqe *tmp; @@ -220,9 +294,10 @@ txq_complete(struct txq *txq) } while (1); if (unlikely(cqe == NULL)) return; - wqe = &(*txq->wqes)[ntohs(cqe->wqe_counter) & - ((1 << txq->wqe_n) - 1)].hdr; - elts_tail = wqe->ctrl[3]; + txq->wqe_pi = ntohs(cqe->wqe_counter); + ctrl = (volatile struct mlx5_wqe_ctrl *) + tx_mlx5_wqe(txq, txq->wqe_pi); + elts_tail = ctrl->ctrl3; assert(elts_tail < (1 << txq->wqe_n)); /* Free buffers. */ while (elts_free != elts_tail) { @@ -326,37 +401,79 @@ mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe) } /** - * Prefetch a CQE. + * DPDK callback to check the status of a tx descriptor. * - * @param txq - * Pointer to TX queue structure. - * @param cqe_ci - * CQE consumer index. + * @param tx_queue + * The tx queue. + * @param[in] offset + * The index of the descriptor in the ring. + * + * @return + * The status of the tx descriptor. */ -static inline void -tx_prefetch_cqe(struct txq *txq, uint16_t ci) +int +mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) { - volatile struct mlx5_cqe *cqe; + struct txq *txq = tx_queue; + const unsigned int elts_n = 1 << txq->elts_n; + const unsigned int elts_cnt = elts_n - 1; + unsigned int used; - cqe = &(*txq->cqes)[ci & ((1 << txq->cqe_n) - 1)]; - rte_prefetch0(cqe); + txq_complete(txq); + used = (txq->elts_head - txq->elts_tail) & elts_cnt; + if (offset < used) + return RTE_ETH_TX_DESC_FULL; + return RTE_ETH_TX_DESC_DONE; } /** - * Prefetch a WQE. + * DPDK callback to check the status of a rx descriptor. * - * @param txq - * Pointer to TX queue structure. - * @param wqe_ci - * WQE consumer index. + * @param rx_queue + * The rx queue. + * @param[in] offset + * The index of the descriptor in the ring. + * + * @return + * The status of the tx descriptor. */ -static inline void -tx_prefetch_wqe(struct txq *txq, uint16_t ci) +int +mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) { - volatile struct mlx5_wqe64 *wqe; + struct rxq *rxq = rx_queue; + struct rxq_zip *zip = &rxq->zip; + volatile struct mlx5_cqe *cqe; + const unsigned int cqe_n = (1 << rxq->cqe_n); + const unsigned int cqe_cnt = cqe_n - 1; + unsigned int cq_ci; + unsigned int used; + + /* if we are processing a compressed cqe */ + if (zip->ai) { + used = zip->cqe_cnt - zip->ca; + cq_ci = zip->cq_ci; + } else { + used = 0; + cq_ci = rxq->cq_ci; + } + cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; + while (check_cqe(cqe, cqe_n, cq_ci) == 0) { + int8_t op_own; + unsigned int n; - wqe = &(*txq->wqes)[ci & ((1 << txq->wqe_n) - 1)]; - rte_prefetch0(wqe); + op_own = cqe->op_own; + if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) + n = ntohl(cqe->byte_cnt); + else + n = 1; + cq_ci += n; + used += n; + cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; + } + used = RTE_MIN(used, (1U << rxq->elts_n) - 1); + if (offset < used) + return RTE_ETH_RX_DESC_DONE; + return RTE_ETH_RX_DESC_AVAIL; } /** @@ -380,9 +497,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) const unsigned int elts_n = 1 << txq->elts_n; unsigned int i = 0; unsigned int j = 0; + unsigned int k = 0; unsigned int max; + unsigned int max_inline = txq->max_inline; + const unsigned int inline_en = !!max_inline && txq->inline_en; + uint16_t max_wqe; unsigned int comp; - volatile struct mlx5_wqe *wqe = NULL; + volatile struct mlx5_wqe_v *wqe = NULL; + volatile struct mlx5_wqe_ctrl *last_wqe = NULL; unsigned int segs_n = 0; struct rte_mbuf *buf = NULL; uint8_t *raw; @@ -390,25 +512,33 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!pkts_n)) return 0; /* Prefetch first packet cacheline. */ - tx_prefetch_cqe(txq, txq->cq_ci); - tx_prefetch_cqe(txq, txq->cq_ci + 1); rte_prefetch0(*pkts); /* Start processing. */ txq_complete(txq); max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; + max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); + if (unlikely(!max_wqe)) + return 0; do { - volatile struct mlx5_wqe_data_seg *dseg = NULL; + volatile rte_v128u32_t *dseg = NULL; uint32_t length; unsigned int ds = 0; + unsigned int sg = 0; /* counter of additional segs attached. */ uintptr_t addr; + uint64_t naddr; + uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2; + uint16_t tso_header_sz = 0; + uint16_t ehdr; + uint8_t cs_flags = 0; + uint64_t tso = 0; #ifdef MLX5_PMD_SOFT_COUNTERS uint32_t total_length = 0; #endif /* first_seg */ - buf = *(pkts++); + buf = *pkts; segs_n = buf->nb_segs; /* * Make sure there is enough room to store this packet and @@ -419,104 +549,204 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) break; max -= segs_n; --segs_n; - if (!segs_n) - --pkts_n; - wqe = &(*txq->wqes)[txq->wqe_ci & - ((1 << txq->wqe_n) - 1)].hdr; - tx_prefetch_wqe(txq, txq->wqe_ci + 1); - if (pkts_n > 1) - rte_prefetch0(*pkts); + if (unlikely(--max_wqe == 0)) + break; + wqe = (volatile struct mlx5_wqe_v *) + tx_mlx5_wqe(txq, txq->wqe_ci); + rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); + if (pkts_n - i > 1) + rte_prefetch0(*(pkts + 1)); addr = rte_pktmbuf_mtod(buf, uintptr_t); length = DATA_LEN(buf); + ehdr = (((uint8_t *)addr)[1] << 8) | + ((uint8_t *)addr)[0]; #ifdef MLX5_PMD_SOFT_COUNTERS total_length = length; #endif - assert(length >= MLX5_WQE_DWORD_SIZE); + if (length < (MLX5_WQE_DWORD_SIZE + 2)) + break; /* Update element. */ (*txq->elts)[elts_head] = buf; - elts_head = (elts_head + 1) & (elts_n - 1); /* Prefetch next buffer data. */ - if (pkts_n > 1) { - volatile void *pkt_addr; - - pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *); - rte_prefetch0(pkt_addr); - } + if (pkts_n - i > 1) + rte_prefetch0( + rte_pktmbuf_mtod(*(pkts + 1), volatile void *)); /* Should we enable HW CKSUM offload */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - wqe->eseg.cs_flags = - MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } else { - wqe->eseg.cs_flags = 0; + const uint64_t is_tunneled = buf->ol_flags & + (PKT_TX_TUNNEL_GRE | + PKT_TX_TUNNEL_VXLAN); + + if (is_tunneled && txq->tunnel_en) { + cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | + MLX5_ETH_WQE_L4_INNER_CSUM; + if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) + cs_flags |= MLX5_ETH_WQE_L3_CSUM; + } else { + cs_flags = MLX5_ETH_WQE_L3_CSUM | + MLX5_ETH_WQE_L4_CSUM; + } } - raw = (uint8_t *)(uintptr_t)&wqe->eseg.inline_hdr[0]; - /* Start the know and common part of the WQE structure. */ - wqe->ctrl[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); - wqe->ctrl[2] = 0; - wqe->ctrl[3] = 0; - wqe->eseg.rsvd0 = 0; - wqe->eseg.rsvd1 = 0; - wqe->eseg.mss = 0; - wqe->eseg.rsvd2 = 0; - /* Start by copying the Ethernet Header. */ - memcpy((uint8_t *)raw, ((uint8_t *)addr), 16); - length -= MLX5_WQE_DWORD_SIZE; - addr += MLX5_WQE_DWORD_SIZE; + raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); - - memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE - - sizeof(vlan)), - &vlan, sizeof(vlan)); - addr -= sizeof(vlan); - length += sizeof(vlan); + unsigned int len = 2 * ETHER_ADDR_LEN - 2; + + addr += 2; + length -= 2; + /* Copy Destination and source mac address. */ + memcpy((uint8_t *)raw, ((uint8_t *)addr), len); + /* Copy VLAN. */ + memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan)); + /* Copy missing two bytes to end the DSeg. */ + memcpy((uint8_t *)raw + len + sizeof(vlan), + ((uint8_t *)addr) + len, 2); + addr += len + 2; + length -= (len + 2); + } else { + memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2, + MLX5_WQE_DWORD_SIZE); + length -= pkt_inline_sz; + addr += pkt_inline_sz; + } + if (txq->tso_en) { + tso = buf->ol_flags & PKT_TX_TCP_SEG; + if (tso) { + uintptr_t end = (uintptr_t) + (((uintptr_t)txq->wqes) + + (1 << txq->wqe_n) * + MLX5_WQE_SIZE); + unsigned int copy_b; + uint8_t vlan_sz = (buf->ol_flags & + PKT_TX_VLAN_PKT) ? 4 : 0; + const uint64_t is_tunneled = + buf->ol_flags & + (PKT_TX_TUNNEL_GRE | + PKT_TX_TUNNEL_VXLAN); + + tso_header_sz = buf->l2_len + vlan_sz + + buf->l3_len + buf->l4_len; + + if (is_tunneled && txq->tunnel_en) { + tso_header_sz += buf->outer_l2_len + + buf->outer_l3_len; + cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; + } else { + cs_flags |= MLX5_ETH_WQE_L4_CSUM; + } + if (unlikely(tso_header_sz > + MLX5_MAX_TSO_HEADER)) + break; + copy_b = tso_header_sz - pkt_inline_sz; + /* First seg must contain all headers. */ + assert(copy_b <= length); + raw += MLX5_WQE_DWORD_SIZE; + if (copy_b && + ((end - (uintptr_t)raw) > copy_b)) { + uint16_t n = (MLX5_WQE_DS(copy_b) - + 1 + 3) / 4; + + if (unlikely(max_wqe < n)) + break; + max_wqe -= n; + rte_memcpy((void *)raw, + (void *)addr, copy_b); + addr += copy_b; + length -= copy_b; + pkt_inline_sz += copy_b; + /* + * Another DWORD will be added + * in the inline part. + */ + raw += MLX5_WQE_DS(copy_b) * + MLX5_WQE_DWORD_SIZE - + MLX5_WQE_DWORD_SIZE; + } else { + /* NOP WQE. */ + wqe->ctrl = (rte_v128u32_t){ + htonl(txq->wqe_ci << 8), + htonl(txq->qp_num_8s | 1), + 0, + 0, + }; + ds = 1; + total_length = 0; + k++; + goto next_wqe; + } + } } /* Inline if enough room. */ - if (txq->max_inline != 0) { - uintptr_t end = - (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n]; - uint16_t max_inline = - txq->max_inline * RTE_CACHE_LINE_SIZE; - uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE; - uint16_t room; + if (inline_en || tso) { + uintptr_t end = (uintptr_t) + (((uintptr_t)txq->wqes) + + (1 << txq->wqe_n) * MLX5_WQE_SIZE); + unsigned int inline_room = max_inline * + RTE_CACHE_LINE_SIZE - + (pkt_inline_sz - 2); + uintptr_t addr_end = (addr + inline_room) & + ~(RTE_CACHE_LINE_SIZE - 1); + unsigned int copy_b = (addr_end > addr) ? + RTE_MIN((addr_end - addr), length) : + 0; raw += MLX5_WQE_DWORD_SIZE; - room = end - (uintptr_t)raw; - if (room > max_inline) { - uintptr_t addr_end = (addr + max_inline) & - ~(RTE_CACHE_LINE_SIZE - 1); - uint16_t copy_b = ((addr_end - addr) > length) ? - length : - (addr_end - addr); - + if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { + /* + * One Dseg remains in the current WQE. To + * keep the computation positive, it is + * removed after the bytes to Dseg conversion. + */ + uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; + + if (unlikely(max_wqe < n)) + break; + max_wqe -= n; + if (tso) { + uint32_t inl = + htonl(copy_b | MLX5_INLINE_SEG); + + pkt_inline_sz = + MLX5_WQE_DS(tso_header_sz) * + MLX5_WQE_DWORD_SIZE; + rte_memcpy((void *)raw, + (void *)&inl, sizeof(inl)); + raw += sizeof(inl); + pkt_inline_sz += sizeof(inl); + } rte_memcpy((void *)raw, (void *)addr, copy_b); addr += copy_b; length -= copy_b; pkt_inline_sz += copy_b; - /* Sanity check. */ - assert(addr <= addr_end); } - /* Store the inlined packet size in the WQE. */ - wqe->eseg.inline_hdr_sz = htons(pkt_inline_sz); /* - * 2 DWORDs consumed by the WQE header + 1 DSEG + + * 2 DWORDs consumed by the WQE header + ETH segment + * the size of the inline part of the packet. */ ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2); if (length > 0) { - dseg = (struct mlx5_wqe_data_seg *) - ((uintptr_t)wqe + - (ds * MLX5_WQE_DWORD_SIZE)); - if ((uintptr_t)dseg >= end) - dseg = (struct mlx5_wqe_data_seg *) - ((uintptr_t)&(*txq->wqes)[0]); + if (ds % (MLX5_WQE_SIZE / + MLX5_WQE_DWORD_SIZE) == 0) { + if (unlikely(--max_wqe == 0)) + break; + dseg = (volatile rte_v128u32_t *) + tx_mlx5_wqe(txq, txq->wqe_ci + + ds / 4); + } else { + dseg = (volatile rte_v128u32_t *) + ((uintptr_t)wqe + + (ds * MLX5_WQE_DWORD_SIZE)); + } goto use_dseg; } else if (!segs_n) { goto next_pkt; } else { + /* dseg will be advance as part of next_seg */ + dseg = (volatile rte_v128u32_t *) + ((uintptr_t)wqe + + ((ds - 1) * MLX5_WQE_DWORD_SIZE)); goto next_seg; } } else { @@ -524,16 +754,17 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * No inline has been done in the packet, only the * Ethernet Header as been stored. */ - wqe->eseg.inline_hdr_sz = htons(MLX5_WQE_DWORD_SIZE); - dseg = (struct mlx5_wqe_data_seg *) + dseg = (volatile rte_v128u32_t *) ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE)); ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - *dseg = (struct mlx5_wqe_data_seg) { - .addr = htonll(addr), - .byte_count = htonl(length), - .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), + naddr = htonll(addr); + *dseg = (rte_v128u32_t){ + htonl(length), + txq_mp2mr(txq, txq_mb2mp(buf)), + naddr, + naddr >> 32, }; ++ds; if (!segs_n) @@ -550,12 +781,12 @@ next_seg: */ assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE)); if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) { - unsigned int n = (txq->wqe_ci + ((ds + 3) / 4)) & - ((1 << txq->wqe_n) - 1); - - dseg = (struct mlx5_wqe_data_seg *) - ((uintptr_t)&(*txq->wqes)[n]); - tx_prefetch_wqe(txq, n + 1); + if (unlikely(--max_wqe == 0)) + break; + dseg = (volatile rte_v128u32_t *) + tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4); + rte_prefetch0(tx_mlx5_wqe(txq, + txq->wqe_ci + ds / 4 + 1)); } else { ++dseg; } @@ -567,38 +798,73 @@ next_seg: total_length += length; #endif /* Store segment information. */ - *dseg = (struct mlx5_wqe_data_seg) { - .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), - .byte_count = htonl(length), - .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), + naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + *dseg = (rte_v128u32_t){ + htonl(length), + txq_mp2mr(txq, txq_mb2mp(buf)), + naddr, + naddr >> 32, }; - (*txq->elts)[elts_head] = buf; elts_head = (elts_head + 1) & (elts_n - 1); - ++j; - --segs_n; - if (segs_n) + (*txq->elts)[elts_head] = buf; + ++sg; + /* Advance counter only if all segs are successfully posted. */ + if (sg < segs_n) goto next_seg; else - --pkts_n; + j += sg; next_pkt: + elts_head = (elts_head + 1) & (elts_n - 1); + ++pkts; ++i; - wqe->ctrl[1] = htonl(txq->qp_num_8s | ds); + /* Initialize known and common part of the WQE structure. */ + if (tso) { + wqe->ctrl = (rte_v128u32_t){ + htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO), + htonl(txq->qp_num_8s | ds), + 0, + 0, + }; + wqe->eseg = (rte_v128u32_t){ + 0, + cs_flags | (htons(buf->tso_segsz) << 16), + 0, + (ehdr << 16) | htons(tso_header_sz), + }; + } else { + wqe->ctrl = (rte_v128u32_t){ + htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), + htonl(txq->qp_num_8s | ds), + 0, + 0, + }; + wqe->eseg = (rte_v128u32_t){ + 0, + cs_flags, + 0, + (ehdr << 16) | htons(pkt_inline_sz), + }; + } +next_wqe: txq->wqe_ci += (ds + 3) / 4; + /* Save the last successful WQE for completion request */ + last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe; #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment sent bytes counter. */ txq->stats.obytes += total_length; #endif - } while (pkts_n); + } while (i < pkts_n); /* Take a shortcut if nothing must be sent. */ - if (unlikely(i == 0)) + if (unlikely((i + k) == 0)) return 0; + txq->elts_head = (txq->elts_head + i + j) & (elts_n - 1); /* Check whether completion threshold has been reached. */ - comp = txq->elts_comp + i + j; + comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + last_wqe->ctrl2 = htonl(8); /* Save elts_head in unused "immediate" field of WQE. */ - wqe->ctrl[3] = elts_head; + last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; } else { txq->elts_comp = comp; @@ -608,8 +874,7 @@ next_pkt: txq->stats.opackets += i; #endif /* Ring QP doorbell. */ - mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe); - txq->elts_head = elts_head; + mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe); return i; } @@ -629,13 +894,13 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1); volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] = (volatile struct mlx5_wqe_data_seg (*)[]) - (uintptr_t)&(*txq->wqes)[(idx + 1) & ((1 << txq->wqe_n) - 1)]; + tx_mlx5_wqe(txq, idx + 1); mpw->state = MLX5_MPW_STATE_OPENED; mpw->pkts_n = 0; mpw->len = length; mpw->total_len = 0; - mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr; + mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); mpw->wqe->eseg.mss = htons(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.rsvd0 = 0; @@ -677,8 +942,8 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw) ++txq->wqe_ci; else txq->wqe_ci += 2; - tx_prefetch_wqe(txq, txq->wqe_ci); - tx_prefetch_wqe(txq, txq->wqe_ci + 1); + rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); + rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); } /** @@ -703,6 +968,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int i = 0; unsigned int j = 0; unsigned int max; + uint16_t max_wqe; unsigned int comp; struct mlx5_mpw mpw = { .state = MLX5_MPW_STATE_CLOSED, @@ -711,14 +977,16 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!pkts_n)) return 0; /* Prefetch first packet cacheline. */ - tx_prefetch_cqe(txq, txq->cq_ci); - tx_prefetch_wqe(txq, txq->wqe_ci); - tx_prefetch_wqe(txq, txq->wqe_ci + 1); + rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); + rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); /* Start processing. */ txq_complete(txq); max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; + max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); + if (unlikely(!max_wqe)) + return 0; do { struct rte_mbuf *buf = *(pkts++); unsigned int elts_head_next; @@ -752,6 +1020,14 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) (mpw.wqe->eseg.cs_flags != cs_flags))) mlx5_mpw_close(txq, &mpw); if (mpw.state == MLX5_MPW_STATE_CLOSED) { + /* + * Multi-Packet WQE consumes at most two WQE. + * mlx5_mpw_new() expects to be able to use such + * resources. + */ + if (unlikely(max_wqe < 2)) + break; + max_wqe -= 2; mlx5_mpw_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; } @@ -841,7 +1117,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) mpw->pkts_n = 0; mpw->len = length; mpw->total_len = 0; - mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr; + mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); @@ -907,18 +1183,30 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, unsigned int i = 0; unsigned int j = 0; unsigned int max; + uint16_t max_wqe; unsigned int comp; unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE; struct mlx5_mpw mpw = { .state = MLX5_MPW_STATE_CLOSED, }; + /* + * Compute the maximum number of WQE which can be consumed by inline + * code. + * - 2 DSEG for: + * - 1 control segment, + * - 1 Ethernet segment, + * - N Dseg from the inline request. + */ + const unsigned int wqe_inl_n = + ((2 * MLX5_WQE_DWORD_SIZE + + txq->max_inline * RTE_CACHE_LINE_SIZE) + + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE; if (unlikely(!pkts_n)) return 0; /* Prefetch first packet cacheline. */ - tx_prefetch_cqe(txq, txq->cq_ci); - tx_prefetch_wqe(txq, txq->wqe_ci); - tx_prefetch_wqe(txq, txq->wqe_ci + 1); + rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci)); + rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1)); /* Start processing. */ txq_complete(txq); max = (elts_n - (elts_head - txq->elts_tail)); @@ -944,6 +1232,11 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, break; max -= segs_n; --pkts_n; + /* + * Compute max_wqe in case less WQE were consumed in previous + * iteration. + */ + max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); /* Should we enable HW CKSUM offload */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) @@ -969,9 +1262,20 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if (mpw.state == MLX5_MPW_STATE_CLOSED) { if ((segs_n != 1) || (length > inline_room)) { + /* + * Multi-Packet WQE consumes at most two WQE. + * mlx5_mpw_new() expects to be able to use + * such resources. + */ + if (unlikely(max_wqe < 2)) + break; + max_wqe -= 2; mlx5_mpw_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; } else { + if (unlikely(max_wqe < wqe_inl_n)) + break; + max_wqe -= wqe_inl_n; mlx5_mpw_inline_new(txq, &mpw, length); mpw.wqe->eseg.cs_flags = cs_flags; } @@ -1019,14 +1323,15 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, addr = rte_pktmbuf_mtod(buf, uintptr_t); (*txq->elts)[elts_head] = buf; /* Maximum number of bytes before wrapping. */ - max = ((uintptr_t)&(*txq->wqes)[1 << txq->wqe_n] - + max = ((((uintptr_t)(txq->wqes)) + + (1 << txq->wqe_n) * + MLX5_WQE_SIZE) - (uintptr_t)mpw.data.raw); if (length > max) { rte_memcpy((void *)(uintptr_t)mpw.data.raw, (void *)addr, max); - mpw.data.raw = - (volatile void *)&(*txq->wqes)[0]; + mpw.data.raw = (volatile void *)txq->wqes; rte_memcpy((void *)(uintptr_t)mpw.data.raw, (void *)(addr + max), length - max); @@ -1035,12 +1340,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, rte_memcpy((void *)(uintptr_t)mpw.data.raw, (void *)addr, length); - mpw.data.raw += length; + + if (length == max) + mpw.data.raw = + (volatile void *)txq->wqes; + else + mpw.data.raw += length; } - if ((uintptr_t)mpw.data.raw == - (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n]) - mpw.data.raw = - (volatile void *)&(*txq->wqes)[0]; ++mpw.pkts_n; mpw.total_len += length; ++j; @@ -1091,6 +1397,360 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, } /** + * Open an Enhanced MPW session. + * + * @param txq + * Pointer to TX queue structure. + * @param mpw + * Pointer to MPW session structure. + * @param length + * Packet length. + */ +static inline void +mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding) +{ + uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1); + + mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED; + mpw->pkts_n = 0; + mpw->total_len = sizeof(struct mlx5_wqe); + mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); + mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_ENHANCED_MPSW); + mpw->wqe->ctrl[2] = 0; + mpw->wqe->ctrl[3] = 0; + memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); + if (unlikely(padding)) { + uintptr_t addr = (uintptr_t)(mpw->wqe + 1); + + /* Pad the first 2 DWORDs with zero-length inline header. */ + *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG); + *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = + htonl(MLX5_INLINE_SEG); + mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; + /* Start from the next WQEBB. */ + mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); + } else { + mpw->data.raw = (volatile void *)(mpw->wqe + 1); + } +} + +/** + * Close an Enhanced MPW session. + * + * @param txq + * Pointer to TX queue structure. + * @param mpw + * Pointer to MPW session structure. + * + * @return + * Number of consumed WQEs. + */ +static inline uint16_t +mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw) +{ + uint16_t ret; + + /* Store size in multiple of 16 bytes. Control and Ethernet segments + * count as 2. + */ + mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len)); + mpw->state = MLX5_MPW_STATE_CLOSED; + ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; + txq->wqe_ci += ret; + return ret; +} + +/** + * DPDK callback for TX with Enhanced MPW support. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct txq *txq = (struct txq *)dpdk_txq; + uint16_t elts_head = txq->elts_head; + const unsigned int elts_n = 1 << txq->elts_n; + unsigned int i = 0; + unsigned int j = 0; + unsigned int max_elts; + uint16_t max_wqe; + unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE; + unsigned int mpw_room = 0; + unsigned int inl_pad = 0; + uint32_t inl_hdr; + struct mlx5_mpw mpw = { + .state = MLX5_MPW_STATE_CLOSED, + }; + + if (unlikely(!pkts_n)) + return 0; + /* Start processing. */ + txq_complete(txq); + max_elts = (elts_n - (elts_head - txq->elts_tail)); + if (max_elts > elts_n) + max_elts -= elts_n; + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); + max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); + if (unlikely(!max_wqe)) + return 0; + do { + struct rte_mbuf *buf = *(pkts++); + unsigned int elts_head_next; + uintptr_t addr; + uint64_t naddr; + unsigned int n; + unsigned int do_inline = 0; /* Whether inline is possible. */ + uint32_t length; + unsigned int segs_n = buf->nb_segs; + uint32_t cs_flags = 0; + + /* + * Make sure there is enough room to store this packet and + * that one ring entry remains unused. + */ + assert(segs_n); + if (max_elts - j < segs_n + 1) + break; + /* Do not bother with large packets MPW cannot handle. */ + if (segs_n > MLX5_MPW_DSEG_MAX) + break; + /* Should we enable HW CKSUM offload. */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) + cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + /* Retrieve packet information. */ + length = PKT_LEN(buf); + /* Start new session if: + * - multi-segment packet + * - no space left even for a dseg + * - next packet can be inlined with a new WQE + * - cs_flag differs + * It can't be MLX5_MPW_STATE_OPENED as always have a single + * segmented packet. + */ + if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) { + if ((segs_n != 1) || + (inl_pad + sizeof(struct mlx5_wqe_data_seg) > + mpw_room) || + (length <= txq->inline_max_packet_sz && + inl_pad + sizeof(inl_hdr) + length > + mpw_room) || + (mpw.wqe->eseg.cs_flags != cs_flags)) + max_wqe -= mlx5_empw_close(txq, &mpw); + } + if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) { + if (unlikely(segs_n != 1)) { + /* Fall back to legacy MPW. + * A MPW session consumes 2 WQEs at most to + * include MLX5_MPW_DSEG_MAX pointers. + */ + if (unlikely(max_wqe < 2)) + break; + mlx5_mpw_new(txq, &mpw, length); + } else { + /* In Enhanced MPW, inline as much as the budget + * is allowed. The remaining space is to be + * filled with dsegs. If the title WQEBB isn't + * padded, it will have 2 dsegs there. + */ + mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX, + (max_inline ? max_inline : + pkts_n * MLX5_WQE_DWORD_SIZE) + + MLX5_WQE_SIZE); + if (unlikely(max_wqe * MLX5_WQE_SIZE < + mpw_room)) + break; + /* Don't pad the title WQEBB to not waste WQ. */ + mlx5_empw_new(txq, &mpw, 0); + mpw_room -= mpw.total_len; + inl_pad = 0; + do_inline = + length <= txq->inline_max_packet_sz && + sizeof(inl_hdr) + length <= mpw_room && + !txq->mpw_hdr_dseg; + } + mpw.wqe->eseg.cs_flags = cs_flags; + } else { + /* Evaluate whether the next packet can be inlined. + * Inlininig is possible when: + * - length is less than configured value + * - length fits for remaining space + * - not required to fill the title WQEBB with dsegs + */ + do_inline = + length <= txq->inline_max_packet_sz && + inl_pad + sizeof(inl_hdr) + length <= + mpw_room && + (!txq->mpw_hdr_dseg || + mpw.total_len >= MLX5_WQE_SIZE); + } + /* Multi-segment packets must be alone in their MPW. */ + assert((segs_n == 1) || (mpw.pkts_n == 0)); + if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) { +#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) + length = 0; +#endif + do { + volatile struct mlx5_wqe_data_seg *dseg; + + elts_head_next = + (elts_head + 1) & (elts_n - 1); + assert(buf); + (*txq->elts)[elts_head] = buf; + dseg = mpw.data.dseg[mpw.pkts_n]; + addr = rte_pktmbuf_mtod(buf, uintptr_t); + *dseg = (struct mlx5_wqe_data_seg){ + .byte_count = htonl(DATA_LEN(buf)), + .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), + .addr = htonll(addr), + }; + elts_head = elts_head_next; +#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) + length += DATA_LEN(buf); +#endif + buf = buf->next; + ++j; + ++mpw.pkts_n; + } while (--segs_n); + /* A multi-segmented packet takes one MPW session. + * TODO: Pack more multi-segmented packets if possible. + */ + mlx5_mpw_close(txq, &mpw); + if (mpw.pkts_n < 3) + max_wqe--; + else + max_wqe -= 2; + } else if (do_inline) { + /* Inline packet into WQE. */ + unsigned int max; + + assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); + assert(length == DATA_LEN(buf)); + inl_hdr = htonl(length | MLX5_INLINE_SEG); + addr = rte_pktmbuf_mtod(buf, uintptr_t); + mpw.data.raw = (volatile void *) + ((uintptr_t)mpw.data.raw + inl_pad); + max = tx_mlx5_wq_tailroom(txq, + (void *)(uintptr_t)mpw.data.raw); + /* Copy inline header. */ + mpw.data.raw = (volatile void *) + mlx5_copy_to_wq( + (void *)(uintptr_t)mpw.data.raw, + &inl_hdr, + sizeof(inl_hdr), + (void *)(uintptr_t)txq->wqes, + max); + max = tx_mlx5_wq_tailroom(txq, + (void *)(uintptr_t)mpw.data.raw); + /* Copy packet data. */ + mpw.data.raw = (volatile void *) + mlx5_copy_to_wq( + (void *)(uintptr_t)mpw.data.raw, + (void *)addr, + length, + (void *)(uintptr_t)txq->wqes, + max); + ++mpw.pkts_n; + mpw.total_len += (inl_pad + sizeof(inl_hdr) + length); + /* No need to get completion as the entire packet is + * copied to WQ. Free the buf right away. + */ + elts_head_next = elts_head; + rte_pktmbuf_free_seg(buf); + mpw_room -= (inl_pad + sizeof(inl_hdr) + length); + /* Add pad in the next packet if any. */ + inl_pad = (((uintptr_t)mpw.data.raw + + (MLX5_WQE_DWORD_SIZE - 1)) & + ~(MLX5_WQE_DWORD_SIZE - 1)) - + (uintptr_t)mpw.data.raw; + } else { + /* No inline. Load a dseg of packet pointer. */ + volatile rte_v128u32_t *dseg; + + assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); + assert((inl_pad + sizeof(*dseg)) <= mpw_room); + assert(length == DATA_LEN(buf)); + if (!tx_mlx5_wq_tailroom(txq, + (void *)((uintptr_t)mpw.data.raw + + inl_pad))) + dseg = (volatile void *)txq->wqes; + else + dseg = (volatile void *) + ((uintptr_t)mpw.data.raw + + inl_pad); + elts_head_next = (elts_head + 1) & (elts_n - 1); + (*txq->elts)[elts_head] = buf; + addr = rte_pktmbuf_mtod(buf, uintptr_t); + for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) + rte_prefetch2((void *)(addr + + n * RTE_CACHE_LINE_SIZE)); + naddr = htonll(addr); + *dseg = (rte_v128u32_t) { + htonl(length), + txq_mp2mr(txq, txq_mb2mp(buf)), + naddr, + naddr >> 32, + }; + mpw.data.raw = (volatile void *)(dseg + 1); + mpw.total_len += (inl_pad + sizeof(*dseg)); + ++j; + ++mpw.pkts_n; + mpw_room -= (inl_pad + sizeof(*dseg)); + inl_pad = 0; + } + elts_head = elts_head_next; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent bytes counter. */ + txq->stats.obytes += length; +#endif + ++i; + } while (i < pkts_n); + /* Take a shortcut if nothing must be sent. */ + if (unlikely(i == 0)) + return 0; + /* Check whether completion threshold has been reached. */ + if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH || + (uint16_t)(txq->wqe_ci - txq->mpw_comp) >= + (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) { + volatile struct mlx5_wqe *wqe = mpw.wqe; + + /* Request completion on last WQE. */ + wqe->ctrl[2] = htonl(8); + /* Save elts_head in unused "immediate" field of WQE. */ + wqe->ctrl[3] = elts_head; + txq->elts_comp = 0; + txq->mpw_comp = txq->wqe_ci; + txq->cq_pi++; + } else { + txq->elts_comp += j; + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent packets counter. */ + txq->stats.opackets += i; +#endif + if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) + mlx5_empw_close(txq, &mpw); + else if (mpw.state == MLX5_MPW_STATE_OPENED) + mlx5_mpw_close(txq, &mpw); + /* Ring QP doorbell. */ + mlx5_tx_dbrec(txq, mpw.wqe); + txq->elts_head = elts_head; + return i; +} + +/** * Translate RX completion flags to packet type. * * @param[in] cqe @@ -1153,6 +1813,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, struct rxq_zip *zip = &rxq->zip; uint16_t cqe_n = cqe_cnt + 1; int len = 0; + uint16_t idx, end; /* Process compressed data in the CQE and mini arrays. */ if (zip->ai) { @@ -1163,6 +1824,14 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, len = ntohl((*mc)[zip->ai & 7].byte_cnt); *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { + /* Invalidate consumed CQEs */ + idx = zip->ca; + end = zip->na; + while (idx != end) { + (*rxq->cqes)[idx & cqe_cnt].op_own = + MLX5_CQE_INVALIDATE; + ++idx; + } /* * Increment consumer index to skip the number of * CQEs consumed. Hardware leaves holes in the CQ @@ -1172,8 +1841,9 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, zip->na += 8; } if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { - uint16_t idx = rxq->cq_ci; - uint16_t end = zip->cq_ci; + /* Invalidate the rest */ + idx = zip->ca; + end = zip->cq_ci; while (idx != end) { (*rxq->cqes)[idx & cqe_cnt].op_own = @@ -1209,7 +1879,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, * special case the second one is located 7 CQEs after * the initial CQE instead of 8 for subsequent ones. */ - zip->ca = rxq->cq_ci & cqe_cnt; + zip->ca = rxq->cq_ci; zip->na = zip->ca + 7; /* Compute the next non compressed CQE. */ --rxq->cq_ci; @@ -1218,6 +1888,13 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe, len = ntohl((*mc)[0].byte_cnt); *rss_hash = ntohl((*mc)[0].rx_hash_result); zip->ai = 1; + /* Prefetch all the entries to be invalidated */ + idx = zip->ca; + end = zip->cq_ci; + while (idx != end) { + rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]); + ++idx; + } } else { len = ntohl(cqe->byte_cnt); *rss_hash = ntohl(cqe->rx_hash_res); @@ -1290,7 +1967,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; unsigned int i = 0; unsigned int rq_ci = rxq->rq_ci << sges_n; - int len; /* keep its value across iterations. */ + int len = 0; /* keep its value across iterations. */ while (pkts_n) { unsigned int idx = rq_ci & wqe_cnt; @@ -1317,8 +1994,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) while (pkt != seg) { assert(pkt != (*rxq->elts)[idx]); rep = NEXT(pkt); - rte_mbuf_refcnt_set(pkt, 0); - __rte_mbuf_raw_free(pkt); + NEXT(pkt) = NULL; + NB_SEGS(pkt) = 1; + rte_mbuf_raw_free(pkt); pkt = rep; } break; @@ -1328,14 +2006,12 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &rss_hash_res); if (!len) { - rte_mbuf_refcnt_set(rep, 0); - __rte_mbuf_raw_free(rep); + rte_mbuf_raw_free(rep); break; } if (unlikely(len == -1)) { /* RX error, packet is likely too large. */ - rte_mbuf_refcnt_set(rep, 0); - __rte_mbuf_raw_free(rep); + rte_mbuf_raw_free(rep); ++rxq->stats.idropped; goto skip; } @@ -1348,23 +2024,31 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) pkt->hash.rss = rss_hash_res; pkt->ol_flags = PKT_RX_RSS_HASH; } - if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip | - rxq->crc_present) { - if (rxq->csum) { - pkt->packet_type = - rxq_cq_to_pkt_type(cqe); - pkt->ol_flags |= - rxq_cq_to_ol_flags(rxq, cqe); - } - if (cqe->hdr_type_etc & - MLX5_CQE_VLAN_STRIPPED) { - pkt->ol_flags |= PKT_RX_VLAN_PKT | - PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = ntohs(cqe->vlan_info); + if (rxq->mark && + MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { + pkt->ol_flags |= PKT_RX_FDIR; + if (cqe->sop_drop_qpn != + htonl(MLX5_FLOW_MARK_DEFAULT)) { + uint32_t mark = cqe->sop_drop_qpn; + + pkt->ol_flags |= PKT_RX_FDIR_ID; + pkt->hash.fdir.hi = + mlx5_flow_mark_get(mark); } - if (rxq->crc_present) - len -= ETHER_CRC_LEN; } + if (rxq->csum | rxq->csum_l2tun) { + pkt->packet_type = rxq_cq_to_pkt_type(cqe); + pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); + } + if (rxq->vlan_strip && + (cqe->hdr_type_etc & + htons(MLX5_CQE_VLAN_STRIPPED))) { + pkt->ol_flags |= PKT_RX_VLAN_PKT | + PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = ntohs(cqe->vlan_info); + } + if (rxq->crc_present) + len -= ETHER_CRC_LEN; PKT_LEN(pkt) = len; } DATA_LEN(rep) = DATA_LEN(seg); @@ -1466,3 +2150,76 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) (void)pkts_n; return 0; } + +/** + * DPDK callback for rx queue interrupt enable. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * RX queue number + * + * @return + * 0 on success, negative on failure. + */ +int +mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#ifdef HAVE_UPDATE_CQ_CI + struct priv *priv = mlx5_get_priv(dev); + struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; + struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct ibv_cq *cq = rxq_ctrl->cq; + uint16_t ci = rxq->cq_ci; + int ret = 0; + + ibv_mlx5_exp_update_cq_ci(cq, ci); + ret = ibv_req_notify_cq(cq, 0); +#else + int ret = -1; + (void)dev; + (void)rx_queue_id; +#endif + if (ret) + WARN("unable to arm interrupt on rx queue %d", rx_queue_id); + return ret; +} + +/** + * DPDK callback for rx queue interrupt disable. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * RX queue number + * + * @return + * 0 on success, negative on failure. + */ +int +mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#ifdef HAVE_UPDATE_CQ_CI + struct priv *priv = mlx5_get_priv(dev); + struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; + struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct ibv_cq *cq = rxq_ctrl->cq; + struct ibv_cq *ev_cq; + void *ev_ctx; + int ret = 0; + + ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx); + if (ret || ev_cq != cq) + ret = -1; + else + ibv_ack_cq_events(cq, 1); +#else + int ret = -1; + (void)dev; + (void)rx_queue_id; +#endif + if (ret) + WARN("unable to disable interrupt on rx queue %d", + rx_queue_id); + return ret; +} diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 909d80e6..8db8eb14 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -114,7 +114,8 @@ struct rxq { unsigned int elts_n:4; /* Log 2 of Mbufs. */ unsigned int port_id:8; unsigned int rss_hash:1; /* RSS hash result is enabled. */ - unsigned int :9; /* Remaining bits. */ + unsigned int mark:1; /* Marked flow available on the queue. */ + unsigned int :8; /* Remaining bits. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t rq_ci; @@ -132,11 +133,9 @@ struct rxq_ctrl { struct priv *priv; /* Back pointer to private data. */ struct ibv_cq *cq; /* Completion Queue. */ struct ibv_exp_wq *wq; /* Work Queue. */ - struct ibv_exp_res_domain *rd; /* Resource Domain. */ struct fdir_queue *fdir_queue; /* Flow director queue. */ struct ibv_mr *mr; /* Memory Region (for mp). */ - struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */ - struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */ + struct ibv_comp_channel *channel; unsigned int socket; /* CPU socket ID for allocations. */ struct rxq rxq; /* Data path structure. */ }; @@ -246,15 +245,24 @@ struct txq { uint16_t elts_head; /* Current index in (*elts)[]. */ uint16_t elts_tail; /* First element awaiting completion. */ uint16_t elts_comp; /* Counter since last completion request. */ + uint16_t mpw_comp; /* WQ index since last completion request. */ uint16_t cq_ci; /* Consumer index for completion queue. */ + uint16_t cq_pi; /* Producer index for completion queue. */ uint16_t wqe_ci; /* Consumer index for work queue. */ + uint16_t wqe_pi; /* Producer index for work queue. */ uint16_t elts_n:4; /* (*elts)[] length (in log2). */ uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */ + uint16_t inline_en:1; /* When set inline is enabled. */ + uint16_t tso_en:1; /* When set hardware TSO is enabled. */ + uint16_t tunnel_en:1; + /* When set TX offload for tunneled packets are supported. */ + uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ + uint16_t inline_max_packet_sz; /* Max packet size for inlining. */ uint32_t qp_num_8s; /* QP number shifted by 8. */ volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ - volatile struct mlx5_wqe64 (*wqes)[]; /* Work queue. */ + volatile void *wqes; /* Work queue (use volatile to write into). */ volatile uint32_t *qp_db; /* Work queue doorbell. */ volatile uint32_t *cq_db; /* Completion queue doorbell. */ volatile void *bf_reg; /* Blueflame register. */ @@ -272,9 +280,6 @@ struct txq_ctrl { struct priv *priv; /* Back pointer to private data. */ struct ibv_cq *cq; /* Completion Queue. */ struct ibv_qp *qp; /* Queue Pair. */ - struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ - struct ibv_exp_cq_family *if_cq; /* CQ interface. */ - struct ibv_exp_res_domain *rd; /* Resource Domain. */ unsigned int socket; /* CPU socket ID for allocations. */ struct txq txq; /* Data path structure. */ }; @@ -293,6 +298,10 @@ int priv_create_hash_rxqs(struct priv *); void priv_destroy_hash_rxqs(struct priv *); int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type); int priv_rehash_flows(struct priv *); +int priv_intr_efd_enable(struct priv *priv); +void priv_intr_efd_disable(struct priv *priv); +int priv_create_intr_vec(struct priv *priv); +void priv_destroy_intr_vec(struct priv *priv); void rxq_cleanup(struct rxq_ctrl *); int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *); int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t, @@ -318,9 +327,14 @@ uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); +int mlx5_rx_descriptor_status(void *, uint16_t); +int mlx5_tx_descriptor_status(void *, uint16_t); +int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); /* mlx5_mr.c */ diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index f2b5781a..703f48c3 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -31,11 +31,16 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include <linux/sockios.h> +#include <linux/ethtool.h> + /* DPDK headers don't like -pedantic. */ #ifdef PEDANTIC #pragma GCC diagnostic ignored "-Wpedantic" #endif #include <rte_ethdev.h> +#include <rte_common.h> +#include <rte_malloc.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif @@ -44,6 +49,274 @@ #include "mlx5_rxtx.h" #include "mlx5_defs.h" +struct mlx5_counter_ctrl { + /* Name of the counter. */ + char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE]; + /* Name of the counter on the device table. */ + char ctr_name[RTE_ETH_XSTATS_NAME_SIZE]; +}; + +static const struct mlx5_counter_ctrl mlx5_counters_init[] = { + { + .dpdk_name = "rx_port_unicast_bytes", + .ctr_name = "rx_vport_unicast_bytes", + }, + { + .dpdk_name = "rx_port_multicast_bytes", + .ctr_name = "rx_vport_multicast_bytes", + }, + { + .dpdk_name = "rx_port_broadcast_bytes", + .ctr_name = "rx_vport_broadcast_bytes", + }, + { + .dpdk_name = "rx_port_unicast_packets", + .ctr_name = "rx_vport_unicast_packets", + }, + { + .dpdk_name = "rx_port_multicast_packets", + .ctr_name = "rx_vport_multicast_packets", + }, + { + .dpdk_name = "rx_port_broadcast_packets", + .ctr_name = "rx_vport_broadcast_packets", + }, + { + .dpdk_name = "tx_port_unicast_bytes", + .ctr_name = "tx_vport_unicast_bytes", + }, + { + .dpdk_name = "tx_port_multicast_bytes", + .ctr_name = "tx_vport_multicast_bytes", + }, + { + .dpdk_name = "tx_port_broadcast_bytes", + .ctr_name = "tx_vport_broadcast_bytes", + }, + { + .dpdk_name = "tx_port_unicast_packets", + .ctr_name = "tx_vport_unicast_packets", + }, + { + .dpdk_name = "tx_port_multicast_packets", + .ctr_name = "tx_vport_multicast_packets", + }, + { + .dpdk_name = "tx_port_broadcast_packets", + .ctr_name = "tx_vport_broadcast_packets", + }, + { + .dpdk_name = "rx_wqe_err", + .ctr_name = "rx_wqe_err", + }, + { + .dpdk_name = "rx_crc_errors_phy", + .ctr_name = "rx_crc_errors_phy", + }, + { + .dpdk_name = "rx_in_range_len_errors_phy", + .ctr_name = "rx_in_range_len_errors_phy", + }, + { + .dpdk_name = "rx_symbol_err_phy", + .ctr_name = "rx_symbol_err_phy", + }, + { + .dpdk_name = "tx_errors_phy", + .ctr_name = "tx_errors_phy", + }, + { + .dpdk_name = "rx_out_of_buffer", + .ctr_name = "out_of_buffer", + }, +}; + +static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); + +/** + * Read device counters table. + * + * @param priv + * Pointer to private structure. + * @param[out] stats + * Counters table output buffer. + * + * @return + * 0 on success and stats is filled, negative on error. + */ +static int +priv_read_dev_counters(struct priv *priv, uint64_t *stats) +{ + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + struct ifreq ifr; + unsigned int stats_sz = (xstats_ctrl->stats_n * sizeof(uint64_t)) + + sizeof(struct ethtool_stats); + struct ethtool_stats et_stats[(stats_sz + ( + sizeof(struct ethtool_stats) - 1)) / + sizeof(struct ethtool_stats)]; + + et_stats->cmd = ETHTOOL_GSTATS; + et_stats->n_stats = xstats_ctrl->stats_n; + ifr.ifr_data = (caddr_t)et_stats; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + WARN("unable to read statistic values from device"); + return -1; + } + for (i = 0; i != xstats_n; ++i) { + if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name)) + priv_get_cntr_sysfs(priv, + mlx5_counters_init[i].ctr_name, + &stats[i]); + else + stats[i] = (uint64_t) + et_stats->data[xstats_ctrl->dev_table_idx[i]]; + } + return 0; +} + +/** + * Query the number of statistics provided by ETHTOOL. + * + * @param priv + * Pointer to private structure. + * + * @return + * Number of statistics on success, -1 on error. + */ +static int +priv_ethtool_get_stats_n(struct priv *priv) { + struct ethtool_drvinfo drvinfo; + struct ifreq ifr; + + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (caddr_t)&drvinfo; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + WARN("unable to query number of statistics"); + return -1; + } + return drvinfo.n_stats; +} + +/** + * Init the structures to read device counters. + * + * @param priv + * Pointer to private structure. + */ +void +priv_xstats_init(struct priv *priv) +{ + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + unsigned int j; + struct ifreq ifr; + struct ethtool_gstrings *strings = NULL; + unsigned int dev_stats_n; + unsigned int str_sz; + + dev_stats_n = priv_ethtool_get_stats_n(priv); + if (dev_stats_n < 1) { + WARN("no extended statistics available"); + return; + } + xstats_ctrl->stats_n = dev_stats_n; + /* Allocate memory to grab stat names and values. */ + str_sz = dev_stats_n * ETH_GSTRING_LEN; + strings = (struct ethtool_gstrings *) + rte_malloc("xstats_strings", + str_sz + sizeof(struct ethtool_gstrings), 0); + if (!strings) { + WARN("unable to allocate memory for xstats"); + return; + } + strings->cmd = ETHTOOL_GSTRINGS; + strings->string_set = ETH_SS_STATS; + strings->len = dev_stats_n; + ifr.ifr_data = (caddr_t)strings; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + WARN("unable to get statistic names"); + goto free; + } + for (j = 0; j != xstats_n; ++j) + xstats_ctrl->dev_table_idx[j] = dev_stats_n; + for (i = 0; i != dev_stats_n; ++i) { + const char *curr_string = (const char *) + &strings->data[i * ETH_GSTRING_LEN]; + + for (j = 0; j != xstats_n; ++j) { + if (!strcmp(mlx5_counters_init[j].ctr_name, + curr_string)) { + xstats_ctrl->dev_table_idx[j] = i; + break; + } + } + } + for (j = 0; j != xstats_n; ++j) { + if (priv_is_ib_cntr(mlx5_counters_init[j].ctr_name)) + continue; + if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) { + WARN("counter \"%s\" is not recognized", + mlx5_counters_init[j].dpdk_name); + goto free; + } + } + /* Copy to base at first time. */ + assert(xstats_n <= MLX5_MAX_XSTATS); + priv_read_dev_counters(priv, xstats_ctrl->base); +free: + rte_free(strings); +} + +/** + * Get device extended statistics. + * + * @param priv + * Pointer to private structure. + * @param[out] stats + * Pointer to rte extended stats table. + * + * @return + * Number of extended stats on success and stats is filled, + * negative on error. + */ +static int +priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats) +{ + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + unsigned int n = xstats_n; + uint64_t counters[n]; + + if (priv_read_dev_counters(priv, counters) < 0) + return -1; + for (i = 0; i != xstats_n; ++i) { + stats[i].id = i; + stats[i].value = (counters[i] - xstats_ctrl->base[i]); + } + return n; +} + +/** + * Reset device extended statistics. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_xstats_reset(struct priv *priv) +{ + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + unsigned int n = xstats_n; + uint64_t counters[n]; + + if (priv_read_dev_counters(priv, counters) < 0) + return; + for (i = 0; i != n; ++i) + xstats_ctrl->base[i] = counters[i]; +} + /** * DPDK callback to get device statistics. * @@ -142,3 +415,95 @@ mlx5_stats_reset(struct rte_eth_dev *dev) #endif priv_unlock(priv); } + +/** + * DPDK callback to get extended device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] stats + * Stats table output buffer. + * @param n + * The size of the stats table. + * + * @return + * Number of xstats on success, negative on failure. + */ +int +mlx5_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, unsigned int n) +{ + struct priv *priv = mlx5_get_priv(dev); + int ret = xstats_n; + + if (n >= xstats_n && stats) { + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + int stats_n; + + priv_lock(priv); + stats_n = priv_ethtool_get_stats_n(priv); + if (stats_n < 0) + return -1; + if (xstats_ctrl->stats_n != stats_n) + priv_xstats_init(priv); + ret = priv_xstats_get(priv, stats); + priv_unlock(priv); + } + return ret; +} + +/** + * DPDK callback to clear device extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mlx5_xstats_reset(struct rte_eth_dev *dev) +{ + struct priv *priv = mlx5_get_priv(dev); + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + int stats_n; + + priv_lock(priv); + stats_n = priv_ethtool_get_stats_n(priv); + if (stats_n < 0) + return; + if (xstats_ctrl->stats_n != stats_n) + priv_xstats_init(priv); + priv_xstats_reset(priv); + priv_unlock(priv); +} + +/** + * DPDK callback to retrieve names of extended device statistics + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] xstats_names + * Buffer to insert names into. + * @param n + * Number of names. + * + * @return + * Number of xstats names. + */ +int +mlx5_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned int n) +{ + struct priv *priv = mlx5_get_priv(dev); + unsigned int i; + + if (n >= xstats_n && xstats_names) { + priv_lock(priv); + for (i = 0; i != xstats_n; ++i) { + strncpy(xstats_names[i].name, + mlx5_counters_init[i].dpdk_name, + RTE_ETH_XSTATS_NAME_SIZE); + xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; + } + priv_unlock(priv); + } + return xstats_n; +} diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index d4dccd88..8c5aa691 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -82,14 +82,33 @@ mlx5_dev_start(struct rte_eth_dev *dev) ERROR("%p: an error occurred while configuring hash RX queues:" " %s", (void *)priv, strerror(err)); - /* Rollback. */ - priv_special_flow_disable_all(priv); - priv_mac_addrs_disable(priv); - priv_destroy_hash_rxqs(priv); + goto error; } if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) priv_fdir_enable(priv); + err = priv_flow_start(priv); + if (err) { + priv->started = 0; + ERROR("%p: an error occurred while configuring flows:" + " %s", + (void *)priv, strerror(err)); + goto error; + } priv_dev_interrupt_handler_install(priv, dev); + if (dev->data->dev_conf.intr_conf.rxq) { + err = priv_intr_efd_enable(priv); + if (!err) + err = priv_create_intr_vec(priv); + } + priv_xstats_init(priv); + priv_unlock(priv); + return 0; +error: + /* Rollback. */ + priv_special_flow_disable_all(priv); + priv_mac_addrs_disable(priv); + priv_destroy_hash_rxqs(priv); + priv_flow_stop(priv); priv_unlock(priv); return -err; } @@ -120,7 +139,12 @@ mlx5_dev_stop(struct rte_eth_dev *dev) priv_mac_addrs_disable(priv); priv_destroy_hash_rxqs(priv); priv_fdir_disable(priv); + priv_flow_stop(priv); priv_dev_interrupt_handler_uninstall(priv, dev); + if (priv->dev->data->dev_conf.intr_conf.rxq) { + priv_destroy_intr_vec(priv); + priv_intr_efd_disable(priv); + } priv->started = 0; priv_unlock(priv); } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 439908fc..de7e28be 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -82,7 +82,9 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) { - volatile struct mlx5_wqe64 *wqe = &(*txq_ctrl->txq.wqes)[i]; + volatile struct mlx5_wqe64 *wqe = + (volatile struct mlx5_wqe64 *) + txq_ctrl->txq.wqes + i; memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe)); } @@ -138,48 +140,14 @@ txq_free_elts(struct txq_ctrl *txq_ctrl) void txq_cleanup(struct txq_ctrl *txq_ctrl) { - struct ibv_exp_release_intf_params params; size_t i; DEBUG("cleaning up %p", (void *)txq_ctrl); txq_free_elts(txq_ctrl); - if (txq_ctrl->if_qp != NULL) { - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - assert(txq_ctrl->qp != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx, - txq_ctrl->if_qp, - ¶ms)); - } - if (txq_ctrl->if_cq != NULL) { - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - assert(txq_ctrl->cq != NULL); - params = (struct ibv_exp_release_intf_params){ - .comp_mask = 0, - }; - claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx, - txq_ctrl->if_cq, - ¶ms)); - } if (txq_ctrl->qp != NULL) claim_zero(ibv_destroy_qp(txq_ctrl->qp)); if (txq_ctrl->cq != NULL) claim_zero(ibv_destroy_cq(txq_ctrl->cq)); - if (txq_ctrl->rd != NULL) { - struct ibv_exp_destroy_res_domain_attr attr = { - .comp_mask = 0, - }; - - assert(txq_ctrl->priv != NULL); - assert(txq_ctrl->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx, - txq_ctrl->rd, - &attr)); - } for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) { if (txq_ctrl->txq.mp2mr[i].mp == NULL) break; @@ -214,9 +182,7 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) } tmpl->txq.cqe_n = log2above(ibcq->cqe); tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8; - tmpl->txq.wqes = - (volatile struct mlx5_wqe64 (*)[]) - (uintptr_t)qp->gen_data.sqstart; + tmpl->txq.wqes = qp->gen_data.sqstart; tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt); tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR]; tmpl->txq.bf_reg = qp->gen_data.bf->reg; @@ -258,14 +224,15 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, .socket = socket, }; union { - struct ibv_exp_query_intf_params params; struct ibv_exp_qp_init_attr init; - struct ibv_exp_res_domain_init_attr rd; struct ibv_exp_cq_init_attr cq; struct ibv_exp_qp_attr mod; struct ibv_exp_cq_attr cq_attr; } attr; - enum ibv_exp_query_intf_status status; + unsigned int cqe_n; + const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER + + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE); int ret = 0; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { @@ -276,27 +243,18 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, (void)conf; /* Thresholds configuration (ignored). */ assert(desc > MLX5_TX_COMP_THRESH); tmpl.txq.elts_n = log2above(desc); + if (priv->mps == MLX5_MPW_ENHANCED) + tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg; /* MRs will be registered in mp2mr[] later. */ - attr.rd = (struct ibv_exp_res_domain_init_attr){ - .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | - IBV_EXP_RES_DOMAIN_MSG_MODEL), - .thread_model = IBV_EXP_THREAD_SINGLE, - .msg_model = IBV_EXP_MSG_HIGH_BW, - }; - tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); - if (tmpl.rd == NULL) { - ret = ENOMEM; - ERROR("%p: RD creation failure: %s", - (void *)dev, strerror(ret)); - goto error; - } attr.cq = (struct ibv_exp_cq_init_attr){ - .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, - .res_domain = tmpl.rd, + .comp_mask = 0, }; + cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ? + ((desc / MLX5_TX_COMP_THRESH) - 1) : 1; + if (priv->mps == MLX5_MPW_ENHANCED) + cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; tmpl.cq = ibv_exp_create_cq(priv->ctx, - (((desc / MLX5_TX_COMP_THRESH) - 1) ? - ((desc / MLX5_TX_COMP_THRESH) - 1) : 1), + cqe_n, NULL, NULL, 0, &attr.cq); if (tmpl.cq == NULL) { ret = ENOMEM; @@ -332,17 +290,52 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, * TX burst. */ .sq_sig_all = 0, .pd = priv->pd, - .res_domain = tmpl.rd, - .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | - IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), + .comp_mask = IBV_EXP_QP_INIT_ATTR_PD, }; if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) { tmpl.txq.max_inline = ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE); - attr.init.cap.max_inline_data = - tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; + tmpl.txq.inline_en = 1; + /* TSO and MPS can't be enabled concurrently. */ + assert(!priv->tso || !priv->mps); + if (priv->mps == MLX5_MPW_ENHANCED) { + tmpl.txq.inline_max_packet_sz = + priv->inline_max_packet_sz; + /* To minimize the size of data set, avoid requesting + * too large WQ. + */ + attr.init.cap.max_inline_data = + ((RTE_MIN(priv->txq_inline, + priv->inline_max_packet_sz) + + (RTE_CACHE_LINE_SIZE - 1)) / + RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; + } else if (priv->tso) { + int inline_diff = tmpl.txq.max_inline - max_tso_inline; + + /* + * Adjust inline value as Verbs aggregates + * tso_inline and txq_inline fields. + */ + attr.init.cap.max_inline_data = inline_diff > 0 ? + inline_diff * + RTE_CACHE_LINE_SIZE : + 0; + } else { + attr.init.cap.max_inline_data = + tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE; + } } + if (priv->tso) { + attr.init.max_tso_header = + max_tso_inline * RTE_CACHE_LINE_SIZE; + attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER; + tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline, + max_tso_inline); + tmpl.txq.tso_en = 1; + } + if (priv->tunnel_en) + tmpl.txq.tunnel_en = 1; tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); if (tmpl.qp == NULL) { ret = (errno ? errno : EINVAL); @@ -391,36 +384,6 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, (void *)dev, strerror(ret)); goto error; } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_CQ, - .obj = tmpl.cq, - }; - tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_cq == NULL) { - ret = EINVAL; - ERROR("%p: CQ interface family query failed with status %d", - (void *)dev, status); - goto error; - } - attr.params = (struct ibv_exp_query_intf_params){ - .intf_scope = IBV_EXP_INTF_GLOBAL, - .intf = IBV_EXP_INTF_QP_BURST, - .intf_version = 1, - .obj = tmpl.qp, - /* Enable multi-packet send if supported. */ - .family_flags = - ((priv->mps && !priv->sriov) ? - IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR : - 0), - }; - tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); - if (tmpl.if_qp == NULL) { - ret = EINVAL; - ERROR("%p: QP interface family query failed with status %d", - (void *)dev, status); - goto error; - } /* Clean up txq in case we're reinitializing it. */ DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl); txq_cleanup(txq_ctrl); @@ -496,6 +459,19 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } (*priv->txqs)[idx] = NULL; txq_cleanup(txq_ctrl); + /* Resize if txq size is changed. */ + if (txq_ctrl->txq.elts_n != log2above(desc)) { + txq_ctrl = rte_realloc(txq_ctrl, + sizeof(*txq_ctrl) + + desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE); + if (!txq_ctrl) { + ERROR("%p: unable to reallocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } } else { txq_ctrl = rte_calloc_socket("TXQ", 1, |