diff options
Diffstat (limited to 'drivers/net/mlx5')
-rw-r--r-- | drivers/net/mlx5/mlx5.c | 31 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5.h | 18 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_defs.h | 8 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_ethdev.c | 186 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_flow.c | 252 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_mac.c | 6 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_mr.c | 5 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rss.c | 6 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxmode.c | 8 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxq.c | 6 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx.c | 41 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx.h | 47 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx_vec.c | 25 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 38 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 31 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_stats.c | 8 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_trigger.c | 42 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_txq.c | 8 | ||||
-rw-r--r-- | drivers/net/mlx5/mlx5_vlan.c | 5 |
19 files changed, 380 insertions, 391 deletions
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 0548d17a..45e0e8db 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -158,7 +158,6 @@ mlx5_alloc_verbs_buf(size_t size, void *data) size_t alignment = sysconf(_SC_PAGESIZE); assert(data != NULL); - assert(!mlx5_is_secondary()); ret = rte_malloc_socket(__func__, size, alignment, priv->dev->device->numa_node); DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret); @@ -177,7 +176,6 @@ static void mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) { assert(data != NULL); - assert(!mlx5_is_secondary()); DEBUG("Extern free request: %p", ptr); rte_free(ptr); } @@ -662,6 +660,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt); for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { + char name[RTE_ETH_NAME_MAX_LEN]; uint32_t port = i + 1; /* ports are indexed from one */ uint32_t test = (1 << i); struct ibv_context *ctx = NULL; @@ -685,14 +684,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) .rx_vec_en = MLX5_ARG_UNSET, }; - mlx5_dev[idx].ports |= test; + snprintf(name, sizeof(name), PCI_PRI_FMT, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); - if (mlx5_is_secondary()) { - /* from rte_ethdev.c */ - char name[RTE_ETH_NAME_MAX_LEN]; + mlx5_dev[idx].ports |= test; - snprintf(name, sizeof(name), "%s port %u", - ibv_get_device_name(ibv_dev), port); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { eth_dev = rte_eth_dev_attach_secondary(name); if (eth_dev == NULL) { ERROR("can not attach rte ethdev"); @@ -802,7 +800,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags & IBV_DEVICE_VXLAN_SUPPORT); #endif - DEBUG("L2 tunnel checksum offloads are %ssupported", + DEBUG("Rx L2 tunnel checksum offloads are %ssupported", (priv->hw_csum_l2tun ? "" : "not ")); #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT @@ -902,14 +900,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv_get_mtu(priv, &priv->mtu); DEBUG("port %u MTU is %u", priv->port, priv->mtu); - /* from rte_ethdev.c */ - { - char name[RTE_ETH_NAME_MAX_LEN]; - - snprintf(name, sizeof(name), "%s port %u", - ibv_get_device_name(ibv_dev), port); - eth_dev = rte_eth_dev_allocate(name); - } + eth_dev = rte_eth_dev_allocate(name); if (eth_dev == NULL) { ERROR("can not allocate rte ethdev"); err = ENOMEM; @@ -920,6 +911,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) eth_dev->device = &pci_dev->device; rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->device->driver = &mlx5_driver.driver; + /* + * Initialize burst functions to prevent crashes before link-up. + */ + eth_dev->rx_pkt_burst = removed_rx_burst; + eth_dev->tx_pkt_burst = removed_tx_burst; priv->dev = eth_dev; eth_dev->dev_ops = &mlx5_dev_ops; /* Register MAC address. */ @@ -939,7 +935,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) /* Bring Ethernet device up. */ DEBUG("forcing Ethernet interface up"); priv_set_flags(priv, ~IFF_UP, IFF_UP); - mlx5_link_update(priv->dev, 1); continue; port_error: diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index e6a69b82..d49595bc 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -165,6 +165,22 @@ priv_lock(struct priv *priv) } /** + * Try to lock private structure to protect it from concurrent access in the + * control path. + * + * @param priv + * Pointer to private structure. + * + * @return + * 1 if the lock is successfully taken; 0 otherwise. + */ +static inline int +priv_trylock(struct priv *priv) +{ + return rte_spinlock_trylock(&priv->lock); +} + +/** * Unlock private structure. * * @param priv @@ -194,6 +210,8 @@ int priv_set_flags(struct priv *, unsigned int, unsigned int); int mlx5_dev_configure(struct rte_eth_dev *); void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *); const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); +int priv_link_update(struct priv *, int); +int priv_force_link_status_change(struct priv *, int); int mlx5_link_update(struct rte_eth_dev *, int); int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t); int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index 3a7706cf..24caf7e7 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -34,6 +34,8 @@ #ifndef RTE_PMD_MLX5_DEFS_H_ #define RTE_PMD_MLX5_DEFS_H_ +#include <rte_ethdev.h> + #include "mlx5_autoconf.h" /* Reported driver name. */ @@ -105,4 +107,10 @@ /* Number of packets vectorized Rx can simultaneously process in a loop. */ #define MLX5_VPMD_DESCS_PER_LOOP 4 +/* Supported RSS */ +#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP)) + +/* Maximum number of attempts to query link status before giving up. */ +#define MLX5_MAX_LINK_QUERY_ATTEMPTS 5 + #endif /* RTE_PMD_MLX5_DEFS_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index a3cef689..ffe1cdd6 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -134,18 +134,6 @@ mlx5_get_priv(struct rte_eth_dev *dev) } /** - * Check if running as a secondary process. - * - * @return - * Nonzero if running as a secondary process. - */ -inline int -mlx5_is_secondary(void) -{ - return rte_eal_process_type() == RTE_PROC_SECONDARY; -} - -/** * Get interface name from private structure. * * @param[in] priv @@ -577,7 +565,7 @@ dev_configure(struct rte_eth_dev *dev) unsigned int j; unsigned int reta_idx_n; const uint8_t use_app_rss_key = - !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; + !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != @@ -649,9 +637,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; int ret; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - priv_lock(priv); ret = dev_configure(dev); assert(ret >= 0); @@ -720,6 +705,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) priv->reta_idx_n : priv->ind_table_max_size; info->hash_key_size = priv->rss_conf.rss_key_len; info->speed_capa = priv->link_speed_capa; + info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; priv_unlock(priv); } @@ -913,25 +899,131 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) } /** - * DPDK callback to retrieve physical link information. + * Enable receiving and transmitting traffic. * - * @param dev - * Pointer to Ethernet device structure. + * @param priv + * Pointer to private structure. + */ +static void +priv_link_start(struct priv *priv) +{ + struct rte_eth_dev *dev = priv->dev; + int err; + + priv_dev_select_tx_function(priv, dev); + priv_dev_select_rx_function(priv, dev); + err = priv_dev_traffic_enable(priv, dev); + if (err) + ERROR("%p: error occurred while configuring control flows: %s", + (void *)priv, strerror(err)); + err = priv_flow_start(priv, &priv->flows); + if (err) + ERROR("%p: error occurred while configuring flows: %s", + (void *)priv, strerror(err)); +} + +/** + * Disable receiving and transmitting traffic. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_link_stop(struct priv *priv) +{ + struct rte_eth_dev *dev = priv->dev; + + priv_flow_stop(priv, &priv->flows); + priv_dev_traffic_disable(priv, dev); + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; +} + +/** + * Retrieve physical link information and update rx/tx_pkt_burst callbacks + * accordingly. + * + * @param priv + * Pointer to private structure. * @param wait_to_complete * Wait for request completion (ignored). */ int -mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +priv_link_update(struct priv *priv, int wait_to_complete) { + struct rte_eth_dev *dev = priv->dev; struct utsname utsname; int ver[3]; + int ret; + struct rte_eth_link dev_link = dev->data->dev_link; if (uname(&utsname) == -1 || sscanf(utsname.release, "%d.%d.%d", &ver[0], &ver[1], &ver[2]) != 3 || KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - return mlx5_link_update_unlocked_gset(dev, wait_to_complete); - return mlx5_link_update_unlocked_gs(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete); + else + ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete); + /* If lsc interrupt is disabled, should always be ready for traffic. */ + if (!dev->data->dev_conf.intr_conf.lsc) { + priv_link_start(priv); + return ret; + } + /* Re-select burst callbacks only if link status has been changed. */ + if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { + if (dev->data->dev_link.link_status == ETH_LINK_UP) + priv_link_start(priv); + else + priv_link_stop(priv); + } + return ret; +} + +/** + * Querying the link status till it changes to the desired state. + * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS. + * + * @param priv + * Pointer to private structure. + * @param status + * Link desired status. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +priv_force_link_status_change(struct priv *priv, int status) +{ + int try = 0; + + while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) { + priv_link_update(priv, 0); + if (priv->dev->data->dev_link.link_status == status) + return 0; + try++; + sleep(1); + } + return -EAGAIN; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + */ +int +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + ret = priv_link_update(priv, wait_to_complete); + priv_unlock(priv); + return ret; } /** @@ -952,9 +1044,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) uint16_t kern_mtu; int ret = 0; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - priv_lock(priv); ret = priv_get_mtu(priv, &kern_mtu); if (ret) @@ -1002,9 +1091,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) }; int ret; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - ifr.ifr_data = (void *)ðpause; priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { @@ -1053,9 +1139,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) }; int ret; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - ifr.ifr_data = (void *)ðpause; ethpause.autoneg = fc_conf->autoneg; if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || @@ -1150,7 +1233,7 @@ priv_link_status_update(struct priv *priv) { struct rte_eth_link *link = &priv->dev->data->dev_link; - mlx5_link_update(priv->dev, 0); + priv_link_update(priv, 0); if (((link->link_speed == 0) && link->link_status) || ((link->link_speed != 0) && !link->link_status)) { /* @@ -1224,8 +1307,12 @@ mlx5_dev_link_status_handler(void *arg) struct priv *priv = dev->data->dev_private; int ret; - priv_lock(priv); - assert(priv->pending_alarm == 1); + while (!priv_trylock(priv)) { + /* Alarm is being canceled. */ + if (priv->pending_alarm == 0) + return; + rte_pause(); + } priv->pending_alarm = 0; ret = priv_link_status_update(priv); priv_unlock(priv); @@ -1295,9 +1382,10 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) if (priv->primary_socket) rte_intr_callback_unregister(&priv->intr_handle_socket, mlx5_dev_handler_socket, dev); - if (priv->pending_alarm) + if (priv->pending_alarm) { + priv->pending_alarm = 0; rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); - priv->pending_alarm = 0; + } priv->intr_handle.fd = 0; priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; priv->intr_handle_socket.fd = 0; @@ -1317,7 +1405,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) { int rc, flags; - assert(!mlx5_is_secondary()); assert(priv->ctx->async_fd > 0); flags = fcntl(priv->ctx->async_fd, F_GETFL); rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); @@ -1348,8 +1435,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) * * @param priv * Pointer to private data structure. - * @param dev - * Pointer to rte_eth_dev structure. * @param up * Nonzero for link up, otherwise link down. * @@ -1357,24 +1442,9 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) * 0 on success, errno value on failure. */ static int -priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up) +priv_dev_set_link(struct priv *priv, int up) { - int err; - - if (up) { - err = priv_set_flags(priv, ~IFF_UP, IFF_UP); - if (err) - return err; - priv_dev_select_tx_function(priv, dev); - priv_dev_select_rx_function(priv, dev); - } else { - err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP); - if (err) - return err; - dev->rx_pkt_burst = removed_rx_burst; - dev->tx_pkt_burst = removed_tx_burst; - } - return 0; + return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP); } /** @@ -1393,7 +1463,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev) int err; priv_lock(priv); - err = priv_dev_set_link(priv, dev, 0); + err = priv_dev_set_link(priv, 0); priv_unlock(priv); return err; } @@ -1414,7 +1484,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev) int err; priv_lock(priv); - err = priv_dev_set_link(priv, dev, 1); + err = priv_dev_set_link(priv, 1); priv_unlock(priv); return err; } diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index f32dfdd3..092644ff 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -50,6 +50,7 @@ #include <rte_malloc.h> #include "mlx5.h" +#include "mlx5_defs.h" #include "mlx5_prm.h" /* Define minimal priority for control plane flows. */ @@ -250,11 +251,8 @@ struct rte_flow { uint8_t rss_key[40]; /**< copy of the RSS key. */ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */ - union { - struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)]; - /**< Flow with Rx queue. */ - struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */ - }; + struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)]; + /**< Flow with Rx queue. */ }; /** Static initializer for items. */ @@ -444,20 +442,12 @@ struct mlx5_flow_parse { uint8_t rss_key[40]; /**< copy of the RSS key. */ enum hash_rxq_type layer; /**< Last pattern layer detected. */ struct ibv_counter_set *cs; /**< Holds the counter set for the rule */ - union { - struct { - struct ibv_flow_attr *ibv_attr; - /**< Pointer to Verbs attributes. */ - unsigned int offset; - /**< Current position or total size of the attribute. */ - } queue[RTE_DIM(hash_rxq_init)]; - struct { - struct ibv_flow_attr *ibv_attr; - /**< Pointer to Verbs attributes. */ - unsigned int offset; - /**< Current position or total size of the attribute. */ - } drop_q; - }; + struct { + struct ibv_flow_attr *ibv_attr; + /**< Pointer to Verbs attributes. */ + unsigned int offset; + /**< Current position or total size of the attribute. */ + } queue[RTE_DIM(hash_rxq_init)]; }; static const struct rte_flow_ops mlx5_flow_ops = { @@ -537,7 +527,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, } if (item->mask) { unsigned int i; - const uint8_t *spec = item->mask; + const uint8_t *spec = item->spec; for (i = 0; i < size; ++i) if ((spec[i] | mask[i]) != mask[i]) @@ -561,7 +551,8 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, } /** - * Copy the RSS configuration from the user ones. + * Copy the RSS configuration from the user ones, of the rss_conf is null, + * uses the driver one. * * @param priv * Pointer to private structure. @@ -578,15 +569,25 @@ priv_flow_convert_rss_conf(struct priv *priv, struct mlx5_flow_parse *parser, const struct rte_eth_rss_conf *rss_conf) { - const struct rte_eth_rss_conf *rss = - rss_conf ? rss_conf : &priv->rss_conf; - - if (rss->rss_key_len > 40) - return EINVAL; - parser->rss_conf.rss_key_len = rss->rss_key_len; - parser->rss_conf.rss_hf = rss->rss_hf; - memcpy(parser->rss_key, rss->rss_key, rss->rss_key_len); - parser->rss_conf.rss_key = parser->rss_key; + /* + * This function is also called at the beginning of + * priv_flow_convert_actions() to initialize the parser with the + * device default RSS configuration. + */ + (void)priv; + if (rss_conf) { + if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) + return EINVAL; + if (rss_conf->rss_key_len != 40) + return EINVAL; + if (rss_conf->rss_key_len && rss_conf->rss_key) { + parser->rss_conf.rss_key_len = rss_conf->rss_key_len; + memcpy(parser->rss_key, rss_conf->rss_key, + rss_conf->rss_key_len); + parser->rss_conf.rss_key = parser->rss_key; + } + parser->rss_conf.rss_hf = rss_conf->rss_hf; + } return 0; } @@ -827,12 +828,8 @@ priv_flow_convert_items_validate(struct priv *priv, (void)priv; /* Initialise the offsets to start after verbs attribute. */ - if (parser->drop) { - parser->drop_q.offset = sizeof(struct ibv_flow_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset = sizeof(struct ibv_flow_attr); - } + for (i = 0; i != hash_rxq_init_n; ++i) + parser->queue[i].offset = sizeof(struct ibv_flow_attr); for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { const struct mlx5_flow_items *token = NULL; unsigned int n; @@ -869,14 +866,16 @@ priv_flow_convert_items_validate(struct priv *priv, parser->inner = IBV_FLOW_SPEC_INNER; } if (parser->drop) { - parser->drop_q.offset += cur_item->dst_sz; - } else if (parser->queues_n == 1) { parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz; } else { for (n = 0; n != hash_rxq_init_n; ++n) parser->queue[n].offset += cur_item->dst_sz; } } + if (parser->drop) { + parser->queue[HASH_RXQ_ETH].offset += + sizeof(struct ibv_flow_spec_action_drop); + } if (parser->mark) { for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset += @@ -885,12 +884,8 @@ priv_flow_convert_items_validate(struct priv *priv, if (parser->count) { unsigned int size = sizeof(struct ibv_flow_spec_counter_action); - if (parser->drop) { - parser->drop_q.offset += size; - } else { - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset += size; - } + for (i = 0; i != hash_rxq_init_n; ++i) + parser->queue[i].offset += size; } return 0; exit_item_not_supported: @@ -1103,22 +1098,11 @@ priv_flow_convert(struct priv *priv, * Allocate the memory space to store verbs specifications. */ if (parser->drop) { - parser->drop_q.ibv_attr = - priv_flow_convert_allocate(priv, attr->priority, - parser->drop_q.offset, - error); - if (!parser->drop_q.ibv_attr) - return ENOMEM; - parser->drop_q.offset = sizeof(struct ibv_flow_attr); - } else if (parser->queues_n == 1) { - unsigned int priority = - attr->priority + - hash_rxq_init[HASH_RXQ_ETH].flow_priority; - unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; - parser->queue[HASH_RXQ_ETH].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); + priv_flow_convert_allocate + (priv, attr->priority, + parser->queue[HASH_RXQ_ETH].offset, + error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) return ENOMEM; parser->queue[HASH_RXQ_ETH].offset = @@ -1172,22 +1156,9 @@ priv_flow_convert(struct priv *priv, * Last step. Complete missing specification to reach the RSS * configuration. */ - if (parser->drop) { - /* - * Drop queue priority needs to be adjusted to - * their most specific layer priority. - */ - parser->drop_q.ibv_attr->priority = - attr->priority + - hash_rxq_init[parser->layer].flow_priority; - } else if (parser->queues_n > 1) { + if (!parser->drop) { priv_flow_convert_finalise(priv, parser); } else { - /* - * Action queue have their priority overridden with - * Ethernet priority, this priority needs to be adjusted to - * their most specific layer priority. - */ parser->queue[HASH_RXQ_ETH].ibv_attr->priority = attr->priority + hash_rxq_init[parser->layer].flow_priority; @@ -1195,10 +1166,6 @@ priv_flow_convert(struct priv *priv, exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { - if (parser->drop) { - rte_free(parser->drop_q.ibv_attr); - parser->drop_q.ibv_attr = NULL; - } for (i = 0; i != hash_rxq_init_n; ++i) { if (parser->queue[i].ibv_attr) { rte_free(parser->queue[i].ibv_attr); @@ -1240,14 +1207,6 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, unsigned int i; void *dst; - if (parser->drop) { - dst = (void *)((uintptr_t)parser->drop_q.ibv_attr + - parser->drop_q.offset); - memcpy(dst, src, size); - ++parser->drop_q.ibv_attr->num_of_specs; - parser->drop_q.offset += size; - return; - } for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) continue; @@ -1340,14 +1299,6 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item, if (!mask) mask = default_mask; - if (parser->drop) { - eth = (void *)((uintptr_t)parser->drop_q.ibv_attr + - parser->drop_q.offset - eth_size); - eth->val.vlan_tag = spec->tci; - eth->mask.vlan_tag = mask->tci; - eth->val.vlan_tag &= eth->mask.vlan_tag; - return 0; - } for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) continue; @@ -1701,23 +1652,25 @@ priv_flow_create_action_queue_drop(struct priv *priv, assert(priv->pd); assert(priv->ctx); flow->drop = 1; - drop = (void *)((uintptr_t)parser->drop_q.ibv_attr + - parser->drop_q.offset); + drop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr + + parser->queue[HASH_RXQ_ETH].offset); *drop = (struct ibv_flow_spec_action_drop){ .type = IBV_FLOW_SPEC_ACTION_DROP, .size = size, }; - ++parser->drop_q.ibv_attr->num_of_specs; - parser->drop_q.offset += size; - flow->drxq.ibv_attr = parser->drop_q.ibv_attr; + ++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs; + parser->queue[HASH_RXQ_ETH].offset += size; + flow->frxq[HASH_RXQ_ETH].ibv_attr = + parser->queue[HASH_RXQ_ETH].ibv_attr; if (parser->count) flow->cs = parser->cs; if (!priv->dev->data->dev_started) return 0; - parser->drop_q.ibv_attr = NULL; - flow->drxq.ibv_flow = ibv_create_flow(priv->flow_drop_queue->qp, - flow->drxq.ibv_attr); - if (!flow->drxq.ibv_flow) { + parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; + flow->frxq[HASH_RXQ_ETH].ibv_flow = + ibv_create_flow(priv->flow_drop_queue->qp, + flow->frxq[HASH_RXQ_ETH].ibv_attr); + if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); err = ENOMEM; @@ -1726,13 +1679,13 @@ priv_flow_create_action_queue_drop(struct priv *priv, return 0; error: assert(flow); - if (flow->drxq.ibv_flow) { - claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow)); - flow->drxq.ibv_flow = NULL; + if (flow->frxq[HASH_RXQ_ETH].ibv_flow) { + claim_zero(ibv_destroy_flow(flow->frxq[HASH_RXQ_ETH].ibv_flow)); + flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; } - if (flow->drxq.ibv_attr) { - rte_free(flow->drxq.ibv_attr); - flow->drxq.ibv_attr = NULL; + if (flow->frxq[HASH_RXQ_ETH].ibv_attr) { + rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr); + flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL; } if (flow->cs) { claim_zero(ibv_destroy_counter_set(flow->cs)); @@ -1947,13 +1900,9 @@ priv_flow_create(struct priv *priv, DEBUG("Flow created %p", (void *)flow); return flow; exit: - if (parser.drop) { - rte_free(parser.drop_q.ibv_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser.queue[i].ibv_attr) - rte_free(parser.queue[i].ibv_attr); - } + for (i = 0; i != hash_rxq_init_n; ++i) { + if (parser.queue[i].ibv_attr) + rte_free(parser.queue[i].ibv_attr); } rte_free(flow); return NULL; @@ -2055,9 +2004,10 @@ priv_flow_destroy(struct priv *priv, } free: if (flow->drop) { - if (flow->drxq.ibv_flow) - claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow)); - rte_free(flow->drxq.ibv_attr); + if (flow->frxq[HASH_RXQ_ETH].ibv_flow) + claim_zero(ibv_destroy_flow + (flow->frxq[HASH_RXQ_ETH].ibv_flow)); + rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr); } else { for (i = 0; i != hash_rxq_init_n; ++i) { struct mlx5_flow *frxq = &flow->frxq[i]; @@ -2224,23 +2174,34 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { unsigned int i; + struct mlx5_ind_table_ibv *ind_tbl = NULL; if (flow->drop) { - if (!flow->drxq.ibv_flow) + if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) continue; - claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow)); - flow->drxq.ibv_flow = NULL; + claim_zero(ibv_destroy_flow + (flow->frxq[HASH_RXQ_ETH].ibv_flow)); + flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; + DEBUG("Flow %p removed", (void *)flow); /* Next flow. */ continue; } + /* Verify the flow has not already been cleaned. */ + for (i = 0; i != hash_rxq_init_n; ++i) { + if (!flow->frxq[i].ibv_flow) + continue; + /* + * Indirection table may be necessary to remove the + * flags in the Rx queues. + * This helps to speed-up the process by avoiding + * another loop. + */ + ind_tbl = flow->frxq[i].hrxq->ind_table; + break; + } + if (i == hash_rxq_init_n) + return; if (flow->mark) { - struct mlx5_ind_table_ibv *ind_tbl = NULL; - - for (i = 0; i != hash_rxq_init_n; ++i) { - if (!flow->frxq[i].hrxq) - continue; - ind_tbl = flow->frxq[i].hrxq->ind_table; - } assert(ind_tbl); for (i = 0; i != ind_tbl->queues_n; ++i) (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0; @@ -2277,10 +2238,11 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) unsigned int i; if (flow->drop) { - flow->drxq.ibv_flow = - ibv_create_flow(priv->flow_drop_queue->qp, - flow->drxq.ibv_attr); - if (!flow->drxq.ibv_flow) { + flow->frxq[HASH_RXQ_ETH].ibv_flow = + ibv_create_flow + (priv->flow_drop_queue->qp, + flow->frxq[HASH_RXQ_ETH].ibv_attr); + if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { DEBUG("Flow %p cannot be applied", (void *)flow); rte_errno = EINVAL; @@ -2875,13 +2837,13 @@ priv_fdir_filter_delete(struct priv *priv, if (parser.drop) { struct ibv_flow_spec_action_drop *drop; - drop = (void *)((uintptr_t)parser.drop_q.ibv_attr + - parser.drop_q.offset); + drop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr + + parser.queue[HASH_RXQ_ETH].offset); *drop = (struct ibv_flow_spec_action_drop){ .type = IBV_FLOW_SPEC_ACTION_DROP, .size = sizeof(struct ibv_flow_spec_action_drop), }; - parser.drop_q.ibv_attr->num_of_specs++; + parser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++; } TAILQ_FOREACH(flow, &priv->flows, next) { struct ibv_flow_attr *attr; @@ -2892,14 +2854,8 @@ priv_fdir_filter_delete(struct priv *priv, void *flow_spec; unsigned int specs_n; - if (parser.drop) - attr = parser.drop_q.ibv_attr; - else - attr = parser.queue[HASH_RXQ_ETH].ibv_attr; - if (flow->drop) - flow_attr = flow->drxq.ibv_attr; - else - flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr; + attr = parser.queue[HASH_RXQ_ETH].ibv_attr; + flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr; /* Compare first the attributes. */ if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr))) continue; @@ -2929,13 +2885,9 @@ wrong_flow: if (flow) priv_flow_destroy(priv, &priv->flows, flow); exit: - if (parser.drop) { - rte_free(parser.drop_q.ibv_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) { - if (parser.queue[i].ibv_attr) - rte_free(parser.queue[i].ibv_attr); - } + for (i = 0; i != hash_rxq_init_n; ++i) { + if (parser.queue[i].ibv_attr) + rte_free(parser.queue[i].ibv_attr); } return -ret; } diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index d17b991e..9fb5ba5e 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -93,8 +93,6 @@ priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { - if (mlx5_is_secondary()) - return; assert(index < MLX5_MAX_MAC_ADDRESSES); memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr)); if (!dev->data->promiscuous && !dev->data->all_multicast) @@ -124,8 +122,6 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, int ret = 0; (void)vmdq; - if (mlx5_is_secondary()) - return 0; assert(index < MLX5_MAX_MAC_ADDRESSES); /* First, make sure this address isn't already configured. */ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { @@ -154,8 +150,6 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, void mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { - if (mlx5_is_secondary()) - return; DEBUG("%p: setting primary MAC address", (void *)dev); mlx5_mac_addr_add(dev, mac_addr, 0, 0); } diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 6b29eed5..2776dc70 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -291,6 +291,9 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) DEBUG("mempool %p area start=%p end=%p size=%zu", (void *)mp, (void *)start, (void *)end, (size_t)(end - start)); + /* Save original addresses for exact MR lookup. */ + mr->start = start; + mr->end = end; /* Round start and end to page boundary if found in memory segments. */ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { uintptr_t addr = (uintptr_t)ms[i].addr; @@ -309,8 +312,6 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp) IBV_ACCESS_LOCAL_WRITE); mr->mp = mp; mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); - mr->start = start; - mr->end = (uintptr_t)mr->mr->addr + mr->mr->length; rte_atomic32_inc(&mr->refcnt); DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv, (void *)mr, rte_atomic32_read(&mr->refcnt)); diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index f3de46de..f47bda66 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -51,6 +51,7 @@ #include <rte_ethdev.h> #include "mlx5.h" +#include "mlx5_defs.h" #include "mlx5_rxtx.h" /** @@ -72,6 +73,10 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev, int ret = 0; priv_lock(priv); + if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { + ret = -EINVAL; + goto out; + } if (rss_conf->rss_key && rss_conf->rss_key_len) { priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_conf->rss_key_len, 0); @@ -274,7 +279,6 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, int ret; struct priv *priv = dev->data->dev_private; - assert(!mlx5_is_secondary()); priv_lock(priv); ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); priv_unlock(priv); diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 0ef2cdf0..6fb245ba 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -60,8 +60,6 @@ void mlx5_promiscuous_enable(struct rte_eth_dev *dev) { - if (mlx5_is_secondary()) - return; dev->data->promiscuous = 1; mlx5_traffic_restart(dev); } @@ -75,8 +73,6 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev) void mlx5_promiscuous_disable(struct rte_eth_dev *dev) { - if (mlx5_is_secondary()) - return; dev->data->promiscuous = 0; mlx5_traffic_restart(dev); } @@ -90,8 +86,6 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev) void mlx5_allmulticast_enable(struct rte_eth_dev *dev) { - if (mlx5_is_secondary()) - return; dev->data->all_multicast = 1; mlx5_traffic_restart(dev); } @@ -105,8 +99,6 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev) void mlx5_allmulticast_disable(struct rte_eth_dev *dev) { - if (mlx5_is_secondary()) - return; dev->data->all_multicast = 0; mlx5_traffic_restart(dev); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 85399eff..20f3ec6c 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -242,8 +242,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, int ret = 0; (void)conf; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; priv_lock(priv); if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); @@ -294,9 +292,6 @@ mlx5_rx_queue_release(void *dpdk_rxq) struct mlx5_rxq_ctrl *rxq_ctrl; struct priv *priv; - if (mlx5_is_secondary()) - return; - if (rxq == NULL) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); @@ -327,7 +322,6 @@ priv_rx_intr_vec_enable(struct priv *priv) unsigned int count = 0; struct rte_intr_handle *intr_handle = priv->dev->intr_handle; - assert(!mlx5_is_secondary()); if (!priv->dev->data->dev_conf.intr_conf.rxq) return 0; priv_rx_intr_vec_disable(priv); diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 2d30c507..32bfa307 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -374,7 +374,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2; uint16_t tso_header_sz = 0; uint16_t ehdr; - uint8_t cs_flags = 0; + uint8_t cs_flags; uint64_t tso = 0; uint16_t tso_segsz = 0; #ifdef MLX5_PMD_SOFT_COUNTERS @@ -417,23 +417,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (pkts_n - i > 1) rte_prefetch0( rte_pktmbuf_mtod(*(pkts + 1), volatile void *)); - /* Should we enable HW CKSUM offload */ - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - const uint64_t is_tunneled = buf->ol_flags & - (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - if (is_tunneled && txq->tunnel_en) { - cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) - cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } + cs_flags = txq_ol_cksum_to_cs(txq, buf); raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { @@ -847,7 +831,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) struct rte_mbuf *buf = *(pkts++); uint32_t length; unsigned int segs_n = buf->nb_segs; - uint32_t cs_flags = 0; + uint32_t cs_flags; /* * Make sure there is enough room to store this packet and @@ -863,10 +847,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } max_elts -= segs_n; --pkts_n; - /* Should we enable HW CKSUM offload */ - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) - cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + cs_flags = txq_ol_cksum_to_cs(txq, buf); /* Retrieve packet information. */ length = PKT_LEN(buf); assert(length); @@ -1072,7 +1053,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, uintptr_t addr; uint32_t length; unsigned int segs_n = buf->nb_segs; - uint32_t cs_flags = 0; + uint8_t cs_flags; /* * Make sure there is enough room to store this packet and @@ -1093,10 +1074,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, * iteration. */ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); - /* Should we enable HW CKSUM offload */ - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) - cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + cs_flags = txq_ol_cksum_to_cs(txq, buf); /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if packet differs. */ @@ -1366,7 +1344,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int do_inline = 0; /* Whether inline is possible. */ uint32_t length; unsigned int segs_n = buf->nb_segs; - uint32_t cs_flags = 0; + uint8_t cs_flags; /* * Make sure there is enough room to store this packet and @@ -1380,10 +1358,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) txq->stats.oerrors++; break; } - /* Should we enable HW CKSUM offload. */ - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) - cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + cs_flags = txq_ol_cksum_to_cs(txq, buf); /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if: diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index d34f3cc0..de5b769e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -114,8 +114,7 @@ struct mlx5_rxq_data { unsigned int elts_n:4; /* Log 2 of Mbufs. */ unsigned int rss_hash:1; /* RSS hash result is enabled. */ unsigned int mark:1; /* Marked flow available on the queue. */ - unsigned int pending_err:1; /* CQE error needs to be handled. */ - unsigned int :14; /* Remaining bits. */ + unsigned int :15; /* Remaining bits. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; @@ -548,15 +547,16 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) struct mlx5_mr *mr; assert(i < RTE_DIM(txq->mp2mr)); - if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end >= addr)) + if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr)) return txq->mp2mr[i]->lkey; for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i]->mr == NULL)) { + if (unlikely(txq->mp2mr[i] == NULL || + txq->mp2mr[i]->mr == NULL)) { /* Unknown MP, add a new MR for it. */ break; } if (txq->mp2mr[i]->start <= addr && - txq->mp2mr[i]->end >= addr) { + txq->mp2mr[i]->end > addr) { assert(txq->mp2mr[i]->lkey != (uint32_t)-1); assert(rte_cpu_to_be_32(txq->mp2mr[i]->mr->lkey) == txq->mp2mr[i]->lkey); @@ -564,7 +564,6 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) return txq->mp2mr[i]->lkey; } } - txq->mr_cache_idx = 0; mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i); /* * Request the reference to use in this queue, the original one is @@ -572,6 +571,7 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) */ if (mr) { rte_atomic32_inc(&mr->refcnt); + txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i; return mr->lkey; } return (uint32_t)-1; @@ -617,4 +617,39 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) mlx5_tx_dbrec_cond_wmb(txq, wqe, 1); } +/** + * Convert the Checksum offloads to Verbs. + * + * @param txq_data + * Pointer to the Tx queue. + * @param buf + * Pointer to the mbuf. + * + * @return + * the converted cs_flags. + */ +static __rte_always_inline uint8_t +txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf) +{ + uint8_t cs_flags = 0; + + /* Should we enable HW CKSUM offload */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | + PKT_TX_OUTER_IP_CKSUM)) { + if (txq_data->tunnel_en && + (buf->ol_flags & + (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) { + cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | + MLX5_ETH_WQE_L4_INNER_CSUM; + if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) + cs_flags |= MLX5_ETH_WQE_L3_CSUM; + } else { + cs_flags = MLX5_ETH_WQE_L3_CSUM | + MLX5_ETH_WQE_L4_CSUM; + } + } + return cs_flags; +} + #endif /* RTE_PMD_MLX5_RXTX_H_ */ diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index ba6c8cef..101aa156 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -123,24 +123,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, for (pos = 1; pos < pkts_n; ++pos) if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask) break; - /* Should open another MPW session for the rest. */ - if (pkts[0]->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - const uint64_t is_tunneled = - pkts[0]->ol_flags & - (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - if (is_tunneled && txq->tunnel_en) { - *cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM) - *cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - *cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } + *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]); return pos; } @@ -261,7 +244,6 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, rxq->stats.ipackets -= (pkts_n - n); rxq->stats.ibytes -= err_bytes; #endif - rxq->pending_err = 0; return n; } @@ -283,9 +265,10 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct mlx5_rxq_data *rxq = dpdk_rxq; uint16_t nb_rx; + uint64_t err = 0; - nb_rx = rxq_burst_v(rxq, pkts, pkts_n); - if (unlikely(rxq->pending_err)) + nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err); + if (unlikely(err)) nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx); return nb_rx; } diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index c721d80e..06f83ef1 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -149,7 +149,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, 11, 10, 9, 8, /* bswap32 */ 12, 13, 14, 15 }; - uint8_t cs_flags = 0; + uint8_t cs_flags; uint16_t max_elts; uint16_t max_wqe; uint8x16_t *t_wqe; @@ -168,22 +168,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, break; wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - const uint64_t is_tunneled = - buf->ol_flags & (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - if (is_tunneled && txq->tunnel_en) { - cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) - cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } + cs_flags = txq_ol_cksum_to_cs(txq, buf); /* Title WQEBB pointer. */ t_wqe = (uint8x16_t *)wqe; dseg = (uint8_t *)(wqe + 1); @@ -590,11 +575,15 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, if (rxq->mark) { const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT); const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR); - const uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID); + uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID); + uint32x4_t invalid_mask; /* Check if flow tag is non-zero then set PKT_RX_FDIR. */ - ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_flags, - vceqzq_u32(flow_tag))); + invalid_mask = vceqzq_u32(flow_tag); + ol_flags = vorrq_u32(ol_flags, + vbicq_u32(fdir_flags, invalid_mask)); + /* Mask out invalid entries. */ + fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask); /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_id_flags, @@ -665,12 +654,16 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, * Array to store received packets. * @param pkts_n * Maximum number of packets in array. + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. * * @return * Number of packets received including errors (<= pkts_n). */ static inline uint16_t -rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + uint64_t *err) { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -970,8 +963,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) opcode = vceq_u16(resp_err_check, opcode); opcode = vbic_u16(opcode, invalid_mask); /* D.4 mark if any error is set */ - rxq->pending_err |= - !!vget_lane_u64(vreinterpret_u64_u16(opcode), 0); + *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0); /* C.4 fill in mbuf - rearm_data and packet_type. */ rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag, opcode, &elts[pos]); diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h index 2b9f1601..7ef2c59e 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h @@ -148,7 +148,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, 8, 9, 10, 11, /* bswap32 */ 4, 5, 6, 7, /* bswap32 */ 0, 1, 2, 3 /* bswap32 */); - uint8_t cs_flags = 0; + uint8_t cs_flags; uint16_t max_elts; uint16_t max_wqe; __m128i *t_wqe, *dseg; @@ -170,22 +170,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, } wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - const uint64_t is_tunneled = - buf->ol_flags & (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - if (is_tunneled && txq->tunnel_en) { - cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) - cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } + cs_flags = txq_ol_cksum_to_cs(txq, buf); /* Title WQEBB pointer. */ t_wqe = (__m128i *)wqe; dseg = (__m128i *)(wqe + 1); @@ -591,7 +576,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], _mm_set_epi32(0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00); const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR); - const __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID); + __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID); __m128i flow_tag, invalid_mask; flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask); @@ -601,7 +586,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], _mm_andnot_si128(invalid_mask, fdir_flags)); /* Mask out invalid entries. */ - flow_tag = _mm_andnot_si128(invalid_mask, flow_tag); + fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags); /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ ol_flags = _mm_or_si128(ol_flags, _mm_andnot_si128( @@ -669,12 +654,16 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], * Array to store received packets. * @param pkts_n * Maximum number of packets in array. + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. * * @return * Number of packets received including errors (<= pkts_n). */ static inline uint16_t -rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + uint64_t *err) { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -936,7 +925,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) opcode = _mm_packs_epi32(opcode, zero); opcode = _mm_andnot_si128(invalid_mask, opcode); /* D.4 mark if any error is set */ - rxq->pending_err |= !!_mm_cvtsi128_si64(opcode); + *err |= _mm_cvtsi128_si64(opcode); /* D.5 fill in mbuf - rearm_data and packet_type. */ rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); if (rxq->hw_timestamp) { diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 5e225d37..2427585f 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -143,11 +143,9 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats) struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; struct ifreq ifr; - unsigned int stats_sz = (xstats_ctrl->stats_n * sizeof(uint64_t)) + - sizeof(struct ethtool_stats); - struct ethtool_stats et_stats[(stats_sz + ( - sizeof(struct ethtool_stats) - 1)) / - sizeof(struct ethtool_stats)]; + unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); + unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; + struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; et_stats->cmd = ETHTOOL_GSTATS; et_stats->n_stats = xstats_ctrl->stats_n; diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 5de2d026..d682ea2c 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -64,8 +64,11 @@ priv_txq_start(struct priv *priv) if (!txq_ctrl) continue; - LIST_FOREACH(mr, &priv->mr, next) + LIST_FOREACH(mr, &priv->mr, next) { priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); + if (idx == MLX5_PMD_TX_MP_CACHE) + break; + } txq_alloc_elts(txq_ctrl); txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); if (!txq_ctrl->ibv) { @@ -132,9 +135,6 @@ mlx5_dev_start(struct rte_eth_dev *dev) struct mlx5_mr *mr = NULL; int err; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - dev->data->dev_started = 1; priv_lock(priv); err = priv_flow_create_drop_queue(priv); @@ -151,38 +151,29 @@ mlx5_dev_start(struct rte_eth_dev *dev) (void *)dev, strerror(err)); goto error; } - /* Update send callback. */ - priv_dev_select_tx_function(priv, dev); err = priv_rxq_start(priv); if (err) { ERROR("%p: RXQ allocation failed: %s", (void *)dev, strerror(err)); goto error; } - /* Update receive callback. */ - priv_dev_select_rx_function(priv, dev); - err = priv_dev_traffic_enable(priv, dev); - if (err) { - ERROR("%p: an error occurred while configuring control flows:" - " %s", - (void *)priv, strerror(err)); - goto error; - } - err = priv_flow_start(priv, &priv->flows); - if (err) { - ERROR("%p: an error occurred while configuring flows:" - " %s", - (void *)priv, strerror(err)); - goto error; - } err = priv_rx_intr_vec_enable(priv); if (err) { ERROR("%p: RX interrupt vector creation failed", (void *)priv); goto error; } - priv_dev_interrupt_handler_install(priv, dev); priv_xstats_init(priv); + /* Update link status and Tx/Rx callbacks for the first time. */ + memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); + INFO("Forcing port %u link to be up", dev->data->port_id); + err = priv_force_link_status_change(priv, ETH_LINK_UP); + if (err) { + DEBUG("Failed to set port %u link to be up", + dev->data->port_id); + goto error; + } + priv_dev_interrupt_handler_install(priv, dev); priv_unlock(priv); return 0; error: @@ -196,7 +187,7 @@ error: priv_rxq_stop(priv); priv_flow_delete_drop_queue(priv); priv_unlock(priv); - return -err; + return err; } /** @@ -213,9 +204,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; - if (mlx5_is_secondary()) - return; - priv_lock(priv); dev->data->dev_started = 0; /* Prevent crashes when queues are still in use. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 9c5860ff..7ca99f5a 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -142,9 +142,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, container_of(txq, struct mlx5_txq_ctrl, txq); int ret = 0; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - priv_lock(priv); if (desc <= MLX5_TX_COMP_THRESH) { WARN("%p: number of descriptors requested for TX queue %u" @@ -203,9 +200,6 @@ mlx5_tx_queue_release(void *dpdk_txq) struct priv *priv; unsigned int i; - if (mlx5_is_secondary()) - return; - if (txq == NULL) return; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); @@ -253,6 +247,8 @@ priv_tx_uar_remap(struct priv *priv, int fd) * Ref to libmlx5 function: mlx5_init_context() */ for (i = 0; i != priv->txqs_n; ++i) { + if (!(*priv->txqs)[i]) + continue; txq = (*priv->txqs)[i]; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); uar_va = (uintptr_t)txq_ctrl->txq.bf_reg; diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 6fc315ef..198a69e3 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -127,6 +127,11 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) DEBUG("set VLAN offloads 0x%x for port %d queue %d", vlan_offloads, rxq->port_id, idx); + if (!rxq_ctrl->ibv) { + /* Update related bits in RX queue. */ + rxq->vlan_strip = !!on; + return; + } mod = (struct ibv_wq_attr){ .attr_mask = IBV_WQ_ATTR_FLAGS, .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, |