From 6e7cbd63706f3435b9d9a2057a37db1da01db9a7 Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Mon, 3 Sep 2018 10:46:47 +0100 Subject: New upstream version 17.11.4 Change-Id: Icb6b9664e7c4adb85c087844abe6e54d6ec32db6 Signed-off-by: Luca Boccassi --- drivers/net/mlx4/mlx4.c | 44 +++++++++++++++++++++++++++----------------- drivers/net/mlx4/mlx4.h | 1 + drivers/net/mlx4/mlx4_rxq.c | 6 ++++++ 3 files changed, 34 insertions(+), 17 deletions(-) (limited to 'drivers/net/mlx4') diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index 4d7bd5f0..d39ad3e0 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -434,6 +434,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) int err = 0; struct ibv_context *attr_ctx = NULL; struct ibv_device_attr device_attr; + struct ibv_device_attr_ex device_attr_ex; struct mlx4_conf conf = { .ports.present = 0, }; @@ -494,19 +495,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) ibv_dev = list[i]; DEBUG("device opened"); if (ibv_query_device(attr_ctx, &device_attr)) { - rte_errno = ENODEV; + err = ENODEV; goto error; } INFO("%u port(s) detected", device_attr.phys_port_cnt); conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1; if (mlx4_args(pci_dev->device.devargs, &conf)) { ERROR("failed to process device arguments"); - rte_errno = EINVAL; + err = EINVAL; goto error; } /* Use all ports when none are defined */ if (!conf.ports.enabled) conf.ports.enabled = conf.ports.present; + /* Retrieve extended device attributes. */ + if (ibv_query_device_ex(attr_ctx, NULL, &device_attr_ex)) { + err = ENODEV; + goto error; + } for (i = 0; i < device_attr.phys_port_cnt; i++) { uint32_t port = i + 1; /* ports are indexed from one */ struct ibv_context *ctx = NULL; @@ -522,18 +528,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) DEBUG("using port %u", port); ctx = ibv_open_device(ibv_dev); if (ctx == NULL) { - rte_errno = ENODEV; + err = ENODEV; goto port_error; } /* Check port status. */ err = ibv_query_port(ctx, port, &port_attr); if (err) { - rte_errno = err; - ERROR("port query failed: %s", strerror(rte_errno)); + err = ENODEV; + ERROR("port query failed: %s", strerror(err)); goto port_error; } if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { - rte_errno = ENOTSUP; + err = ENOTSUP; ERROR("port %d is not configured in Ethernet mode", port); goto port_error; @@ -543,15 +549,16 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) port, ibv_port_state_str(port_attr.state), port_attr.state); /* Make asynchronous FD non-blocking to handle interrupts. */ - if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) { + err = mlx4_fd_set_non_blocking(ctx->async_fd); + if (err) { ERROR("cannot make asynchronous FD non-blocking: %s", - strerror(rte_errno)); + strerror(err)); goto port_error; } /* Allocate protection domain. */ pd = ibv_alloc_pd(ctx); if (pd == NULL) { - rte_errno = ENOMEM; + err = ENOMEM; ERROR("PD allocation failure"); goto port_error; } @@ -560,7 +567,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) sizeof(*priv), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - rte_errno = ENOMEM; + err = ENOMEM; ERROR("priv allocation failure"); goto port_error; } @@ -581,10 +588,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO); DEBUG("L2 tunnel checksum offloads are %ssupported", (priv->hw_csum_l2tun ? "" : "not ")); + priv->hw_rss_max_qps = + device_attr_ex.rss_caps.max_rwq_indirection_table_size; + DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps); /* Configure the first MAC address by default. */ - if (mlx4_get_mac(priv, &mac.addr_bytes)) { + err = mlx4_get_mac(priv, &mac.addr_bytes); + if (err) { ERROR("cannot get MAC address, is mlx4_en loaded?" - " (rte_errno: %s)", strerror(rte_errno)); + " (error: %s)", strerror(err)); goto port_error; } INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", @@ -617,8 +628,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) eth_dev = rte_eth_dev_allocate(name); } if (eth_dev == NULL) { + err = ENOMEM; ERROR("can not allocate rte ethdev"); - rte_errno = ENOMEM; goto port_error; } eth_dev->data->dev_private = priv; @@ -663,8 +674,6 @@ port_error: rte_eth_dev_release_port(eth_dev); break; } - if (i == device_attr.phys_port_cnt) - return 0; /* * XXX if something went wrong in the loop above, there is a resource * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as @@ -676,8 +685,9 @@ error: claim_zero(ibv_close_device(attr_ctx)); if (list) ibv_free_device_list(list); - assert(rte_errno >= 0); - return -rte_errno; + if (err) + rte_errno = err; + return -err; } static const struct rte_pci_id mlx4_pci_id_map[] = { diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 41d652ba..30f12eba 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -129,6 +129,7 @@ struct priv { uint32_t rss_init:1; /**< Common RSS context is initialized. */ uint32_t hw_csum:1; /* Checksum offload is supported. */ uint32_t hw_csum_l2tun:1; /* Checksum support for L2 tunnels. */ + uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */ struct rte_intr_handle intr_handle; /**< Port interrupt handle. */ struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */ LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */ diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c index 06030c2c..b3f70d6c 100644 --- a/drivers/net/mlx4/mlx4_rxq.c +++ b/drivers/net/mlx4/mlx4_rxq.c @@ -365,6 +365,12 @@ mlx4_rss_init(struct priv *priv) if (priv->rss_init) return 0; + if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) { + ERROR("RSS does not support more than %d queues", + priv->hw_rss_max_qps); + rte_errno = EINVAL; + return -rte_errno; + } /* Prepare range for RSS contexts before creating the first WQ. */ ret = mlx4dv_set_context_attr(priv->ctx, MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ, -- cgit 1.2.3-korg