aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx5/mlx5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx5/mlx5.c')
-rw-r--r--drivers/net/mlx5/mlx5.c200
1 files changed, 145 insertions, 55 deletions
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 041cfc33..5aa4adc6 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -37,6 +37,7 @@
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
+#include <errno.h>
#include <net/if.h>
/* Verbs header. */
@@ -57,6 +58,7 @@
#include <rte_ethdev.h>
#include <rte_pci.h>
#include <rte_common.h>
+#include <rte_kvargs.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -67,6 +69,21 @@
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
+/* Device parameter to enable RX completion queue compression. */
+#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
+
+/* Device parameter to configure inline send. */
+#define MLX5_TXQ_INLINE "txq_inline"
+
+/*
+ * Device parameter to configure the number of TX queues threshold for
+ * enabling inline send.
+ */
+#define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
+
+/* Device parameter to enable multi-packet send WQEs. */
+#define MLX5_TXQ_MPW_EN "txq_mpw_en"
+
/**
* Retrieve integer value from environment variable.
*
@@ -98,7 +115,6 @@ static void
mlx5_dev_close(struct rte_eth_dev *dev)
{
struct priv *priv = mlx5_get_priv(dev);
- void *tmp;
unsigned int i;
priv_lock(priv);
@@ -122,12 +138,15 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->rxqs_n); ++i) {
- tmp = (*priv->rxqs)[i];
- if (tmp == NULL)
+ struct rxq *rxq = (*priv->rxqs)[i];
+ struct rxq_ctrl *rxq_ctrl;
+
+ if (rxq == NULL)
continue;
+ rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
(*priv->rxqs)[i] = NULL;
- rxq_cleanup(tmp);
- rte_free(tmp);
+ rxq_cleanup(rxq_ctrl);
+ rte_free(rxq_ctrl);
}
priv->rxqs_n = 0;
priv->rxqs = NULL;
@@ -136,12 +155,15 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_tx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->txqs_n); ++i) {
- tmp = (*priv->txqs)[i];
- if (tmp == NULL)
+ struct txq *txq = (*priv->txqs)[i];
+ struct txq_ctrl *txq_ctrl;
+
+ if (txq == NULL)
continue;
+ txq_ctrl = container_of(txq, struct txq_ctrl, txq);
(*priv->txqs)[i] = NULL;
- txq_cleanup(tmp);
- rte_free(tmp);
+ txq_cleanup(txq_ctrl);
+ rte_free(txq_ctrl);
}
priv->txqs_n = 0;
priv->txqs = NULL;
@@ -190,17 +212,13 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.mac_addr_add = mlx5_mac_addr_add,
.mac_addr_set = mlx5_mac_addr_set,
.mtu_set = mlx5_dev_set_mtu,
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
.reta_update = mlx5_dev_rss_reta_update,
.reta_query = mlx5_dev_rss_reta_query,
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
-#ifdef MLX5_FDIR_SUPPORT
.filter_ctrl = mlx5_dev_filter_ctrl,
-#endif /* MLX5_FDIR_SUPPORT */
};
static struct {
@@ -236,6 +254,90 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
return ret;
}
+/**
+ * Verify and store value for device argument.
+ *
+ * @param[in] key
+ * Key argument to verify.
+ * @param[in] val
+ * Value associated with key.
+ * @param opaque
+ * User data.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mlx5_args_check(const char *key, const char *val, void *opaque)
+{
+ struct priv *priv = opaque;
+ unsigned long tmp;
+
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ WARN("%s: \"%s\" is not a valid integer", key, val);
+ return errno;
+ }
+ if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
+ priv->cqe_comp = !!tmp;
+ } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
+ priv->txq_inline = tmp;
+ } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
+ priv->txqs_inline = tmp;
+ } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
+ priv->mps = !!tmp;
+ } else {
+ WARN("%s: unknown parameter", key);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Parse device parameters.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param devargs
+ * Device arguments structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+mlx5_args(struct priv *priv, struct rte_devargs *devargs)
+{
+ const char **params = (const char *[]){
+ MLX5_RXQ_CQE_COMP_EN,
+ MLX5_TXQ_INLINE,
+ MLX5_TXQS_MIN_INLINE,
+ MLX5_TXQ_MPW_EN,
+ NULL,
+ };
+ struct rte_kvargs *kvlist;
+ int ret = 0;
+ int i;
+
+ if (devargs == NULL)
+ return 0;
+ /* Following UGLY cast is done to pass checkpatch. */
+ kvlist = rte_kvargs_parse(devargs->args, params);
+ if (kvlist == NULL)
+ return 0;
+ /* Process parameters. */
+ for (i = 0; (params[i] != NULL); ++i) {
+ if (rte_kvargs_count(kvlist, params[i])) {
+ ret = rte_kvargs_process(kvlist, params[i],
+ mlx5_args_check, priv);
+ if (ret != 0)
+ return ret;
+ }
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
static struct eth_driver mlx5_driver;
/**
@@ -260,7 +362,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
int err = 0;
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
- unsigned int vf;
+ unsigned int sriov;
unsigned int mps;
int idx;
int i;
@@ -303,17 +405,17 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
(pci_dev->addr.devid != pci_addr.devid) ||
(pci_dev->addr.function != pci_addr.function))
continue;
- vf = ((pci_dev->id.device_id ==
+ sriov = ((pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
(pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF));
/* Multi-packet send is only supported by ConnectX-4 Lx PF. */
mps = (pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4LX);
- INFO("PCI information matches, using device \"%s\" (VF: %s,"
- " MPS: %s)",
+ INFO("PCI information matches, using device \"%s\""
+ " (SR-IOV: %s, MPS: %s)",
list[i]->name,
- vf ? "true" : "false",
+ sriov ? "true" : "false",
mps ? "true" : "false");
attr_ctx = ibv_open_device(list[i]);
err = errno;
@@ -347,23 +449,16 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_pd *pd = NULL;
struct priv *priv = NULL;
struct rte_eth_dev *eth_dev;
-#ifdef HAVE_EXP_QUERY_DEVICE
struct ibv_exp_device_attr exp_device_attr;
-#endif /* HAVE_EXP_QUERY_DEVICE */
struct ether_addr mac;
+ uint16_t num_vfs = 0;
-#ifdef HAVE_EXP_QUERY_DEVICE
exp_device_attr.comp_mask =
IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
IBV_EXP_DEVICE_ATTR_RX_HASH |
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
-#ifdef HAVE_VERBS_RX_END_PADDING
IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
-#endif /* HAVE_VERBS_RX_END_PADDING */
0;
-#endif /* HAVE_EXP_QUERY_DEVICE */
DEBUG("using port %u (%08" PRIx32 ")", port, test);
@@ -414,7 +509,14 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
-#ifdef HAVE_EXP_QUERY_DEVICE
+ priv->mps = mps; /* Enable MPW by default if supported. */
+ priv->cqe_comp = 1; /* Enable compression by default. */
+ err = mlx5_args(priv, pci_dev->devargs);
+ if (err) {
+ ERROR("failed to process device arguments: %s",
+ strerror(err));
+ goto port_error;
+ }
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
goto port_error;
@@ -440,32 +542,28 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
DEBUG("maximum RX indirection table size is %u",
priv->ind_table_max_size);
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &
IBV_EXP_RECEIVE_WQ_CVLAN_STRIP);
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
DEBUG("VLAN stripping is %ssupported",
(priv->hw_vlan_strip ? "" : "not "));
-#ifdef HAVE_VERBS_FCS
priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags &
IBV_EXP_DEVICE_SCATTER_FCS);
-#endif /* HAVE_VERBS_FCS */
DEBUG("FCS stripping configuration is %ssupported",
(priv->hw_fcs_strip ? "" : "not "));
-#ifdef HAVE_VERBS_RX_END_PADDING
priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align;
-#endif /* HAVE_VERBS_RX_END_PADDING */
DEBUG("hardware RX end alignment padding is %ssupported",
(priv->hw_padding ? "" : "not "));
-#else /* HAVE_EXP_QUERY_DEVICE */
- priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
-#endif /* HAVE_EXP_QUERY_DEVICE */
-
- priv->vf = vf;
- priv->mps = mps;
+ priv_get_num_vfs(priv, &num_vfs);
+ priv->sriov = (num_vfs || sriov);
+ if (priv->mps && !mps) {
+ ERROR("multi-packet send not supported on this device"
+ " (" MLX5_TXQ_MPW_EN ")");
+ err = ENOTSUP;
+ goto port_error;
+ }
/* Allocate and register default RSS hash keys. */
priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
sizeof((*priv->rss_conf)[0]), 0);
@@ -608,28 +706,20 @@ error:
static const struct rte_pci_id mlx5_pci_id_map[] = {
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4)
},
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4VF,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
},
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LX,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
},
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
},
{
.vendor_id = 0