From 7595afa4d30097c1177b69257118d8ad89a539be Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Tue, 16 May 2017 14:51:32 +0200 Subject: Imported Upstream version 17.05 Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt --- lib/librte_ether/Makefile | 12 +- lib/librte_ether/rte_eth_ctrl.h | 1 + lib/librte_ether/rte_ethdev.c | 769 +++++++++++--------- lib/librte_ether/rte_ethdev.h | 1036 ++++++++++++++++----------- lib/librte_ether/rte_ethdev_pci.h | 193 +++++ lib/librte_ether/rte_ethdev_vdev.h | 84 +++ lib/librte_ether/rte_ether_version.map | 28 +- lib/librte_ether/rte_flow.c | 159 +++++ lib/librte_ether/rte_flow.h | 1198 ++++++++++++++++++++++++++++++++ lib/librte_ether/rte_flow_driver.h | 182 +++++ 10 files changed, 2926 insertions(+), 736 deletions(-) create mode 100644 lib/librte_ether/rte_ethdev_pci.h create mode 100644 lib/librte_ether/rte_ethdev_vdev.h create mode 100644 lib/librte_ether/rte_flow.c create mode 100644 lib/librte_ether/rte_flow.h create mode 100644 lib/librte_ether/rte_flow_driver.h (limited to 'lib/librte_ether') diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile index efe1e5fe..93fdde10 100644 --- a/lib/librte_ether/Makefile +++ b/lib/librte_ether/Makefile @@ -1,6 +1,6 @@ # BSD LICENSE # -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# Copyright(c) 2010-2016 Intel Corporation. All rights reserved. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -41,18 +41,20 @@ CFLAGS += $(WERROR_FLAGS) EXPORT_MAP := rte_ether_version.map -LIBABIVER := 5 +LIBABIVER := 6 SRCS-y += rte_ethdev.c +SRCS-y += rte_flow.c # # Export include files # SYMLINK-y-include += rte_ethdev.h +SYMLINK-y-include += rte_ethdev_pci.h +SYMLINK-y-include += rte_ethdev_vdev.h SYMLINK-y-include += rte_eth_ctrl.h SYMLINK-y-include += rte_dev_info.h - -# this lib depends upon: -DEPDIRS-y += lib/librte_net lib/librte_eal lib/librte_mempool lib/librte_ring lib/librte_mbuf +SYMLINK-y-include += rte_flow.h +SYMLINK-y-include += rte_flow_driver.h include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ether/rte_eth_ctrl.h index fe80eb01..83869042 100644 --- a/lib/librte_ether/rte_eth_ctrl.h +++ b/lib/librte_ether/rte_eth_ctrl.h @@ -99,6 +99,7 @@ enum rte_filter_type { RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_HASH, RTE_ETH_FILTER_L2_TUNNEL, + RTE_ETH_FILTER_GENERIC, RTE_ETH_FILTER_MAX }; diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 5a317594..83898a8f 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -138,10 +138,18 @@ enum { STAT_QMAP_RX }; -enum { - DEV_DETACHED = 0, - DEV_ATTACHED -}; +uint8_t +rte_eth_find_next(uint8_t port_id) +{ + while (port_id < RTE_MAX_ETHPORTS && + rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED) + port_id++; + + if (port_id >= RTE_MAX_ETHPORTS) + return RTE_MAX_ETHPORTS; + + return port_id; +} static void rte_eth_dev_data_alloc(void) @@ -170,7 +178,7 @@ rte_eth_dev_allocated(const char *name) unsigned i; for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if ((rte_eth_devices[i].attached == DEV_ATTACHED) && + if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) && strcmp(rte_eth_devices[i].data->name, name) == 0) return &rte_eth_devices[i]; } @@ -183,7 +191,7 @@ rte_eth_dev_find_free_port(void) unsigned i; for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (rte_eth_devices[i].attached == DEV_DETACHED) + if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED) return i; } return RTE_MAX_ETHPORTS; @@ -195,7 +203,8 @@ eth_dev_get(uint8_t port_id) struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; eth_dev->data = &rte_eth_dev_data[port_id]; - eth_dev->attached = DEV_ATTACHED; + eth_dev->state = RTE_ETH_DEV_ATTACHED; + TAILQ_INIT(&(eth_dev->link_intr_cbs)); eth_dev_last_created_port = port_id; nb_ports++; @@ -224,9 +233,11 @@ rte_eth_dev_allocate(const char *name) return NULL; } + memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data)); eth_dev = eth_dev_get(port_id); snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name); eth_dev->data->port_id = port_id; + eth_dev->data->mtu = ETHER_MTU; return eth_dev; } @@ -236,8 +247,8 @@ rte_eth_dev_allocate(const char *name) * makes sure that the same device would have the same port id both * in the primary and secondary process. */ -static struct rte_eth_dev * -eth_dev_attach_secondary(const char *name) +struct rte_eth_dev * +rte_eth_dev_attach_secondary(const char *name) { uint8_t i; struct rte_eth_dev *eth_dev; @@ -268,121 +279,16 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) if (eth_dev == NULL) return -EINVAL; - eth_dev->attached = DEV_DETACHED; + eth_dev->state = RTE_ETH_DEV_UNUSED; nb_ports--; return 0; } -int -rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv, - struct rte_pci_device *pci_dev) -{ - struct eth_driver *eth_drv; - struct rte_eth_dev *eth_dev; - char ethdev_name[RTE_ETH_NAME_MAX_LEN]; - - int diag; - - eth_drv = (struct eth_driver *)pci_drv; - - rte_eal_pci_device_name(&pci_dev->addr, ethdev_name, - sizeof(ethdev_name)); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eth_dev = rte_eth_dev_allocate(ethdev_name); - if (eth_dev == NULL) - return -ENOMEM; - - eth_dev->data->dev_private = rte_zmalloc("ethdev private structure", - eth_drv->dev_private_size, - RTE_CACHE_LINE_SIZE); - if (eth_dev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private port data\n"); - } else { - eth_dev = eth_dev_attach_secondary(ethdev_name); - if (eth_dev == NULL) { - /* - * if we failed to attach a device, it means the - * device is skipped in primary process, due to - * some errors. If so, we return a positive value, - * to let EAL skip it for the secondary process - * as well. - */ - return 1; - } - } - eth_dev->pci_dev = pci_dev; - eth_dev->driver = eth_drv; - eth_dev->data->rx_mbuf_alloc_failed = 0; - - /* init user callbacks */ - TAILQ_INIT(&(eth_dev->link_intr_cbs)); - - /* - * Set the default MTU. - */ - eth_dev->data->mtu = ETHER_MTU; - - /* Invoke PMD device initialization function */ - diag = (*eth_drv->eth_dev_init)(eth_dev); - if (diag == 0) - return 0; - - RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n", - pci_drv->driver.name, - (unsigned) pci_dev->id.vendor_id, - (unsigned) pci_dev->id.device_id); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); - rte_eth_dev_release_port(eth_dev); - return diag; -} - -int -rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev) -{ - const struct eth_driver *eth_drv; - struct rte_eth_dev *eth_dev; - char ethdev_name[RTE_ETH_NAME_MAX_LEN]; - int ret; - - if (pci_dev == NULL) - return -EINVAL; - - rte_eal_pci_device_name(&pci_dev->addr, ethdev_name, - sizeof(ethdev_name)); - - eth_dev = rte_eth_dev_allocated(ethdev_name); - if (eth_dev == NULL) - return -ENODEV; - - eth_drv = (const struct eth_driver *)pci_dev->driver; - - /* Invoke PMD device uninit function */ - if (*eth_drv->eth_dev_uninit) { - ret = (*eth_drv->eth_dev_uninit)(eth_dev); - if (ret) - return ret; - } - - /* free ether device */ - rte_eth_dev_release_port(eth_dev); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); - - eth_dev->pci_dev = NULL; - eth_dev->driver = NULL; - eth_dev->data = NULL; - - return 0; -} - int rte_eth_dev_is_valid_port(uint8_t port_id) { if (port_id >= RTE_MAX_ETHPORTS || - rte_eth_devices[port_id].attached != DEV_ATTACHED) + rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED) return 0; else return 1; @@ -434,9 +340,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id) return -ENODEV; *port_id = RTE_MAX_ETHPORTS; - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - + RTE_ETH_FOREACH_DEV(i) { if (!strncmp(name, rte_eth_dev_data[i].name, strlen(name))) { @@ -460,8 +364,8 @@ rte_eth_dev_is_detachable(uint8_t port_id) case RTE_KDRV_UIO_GENERIC: case RTE_KDRV_NIC_UIO: case RTE_KDRV_NONE: - break; case RTE_KDRV_VFIO: + break; default: return -ENOTSUP; } @@ -588,6 +492,9 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->rx_queue_release)(rxq[i]); + + rte_free(dev->data->rx_queues); + dev->data->rx_queues = NULL; } dev->data->nb_rx_queues = nb_queues; return 0; @@ -739,6 +646,9 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->tx_queue_release)(txq[i]); + + rte_free(dev->data->tx_queues); + dev->data->tx_queues = NULL; } dev->data->nb_tx_queues = nb_queues; return 0; @@ -839,16 +749,19 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return -EINVAL; } - /* - * If link state interrupt is enabled, check that the - * device supports it. - */ + /* Check that the device supports requested interrupts */ if ((dev_conf->intr_conf.lsc == 1) && (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n", dev->data->drv_name); return -EINVAL; } + if ((dev_conf->intr_conf.rmv == 1) && + (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { + RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n", + dev->data->drv_name); + return -EINVAL; + } /* * If jumbo frames are enabled, check that the maximum RX packet @@ -909,39 +822,61 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return 0; } +void +_rte_eth_dev_reset(struct rte_eth_dev *dev) +{ + if (dev->data->dev_started) { + RTE_PMD_DEBUG_TRACE( + "port %d must be stopped to allow reset\n", + dev->data->port_id); + return; + } + + rte_eth_dev_rx_queue_config(dev, 0); + rte_eth_dev_tx_queue_config(dev, 0); + + memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); +} + static void rte_eth_dev_config_restore(uint8_t port_id) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; - struct ether_addr addr; + struct ether_addr *addr; uint16_t i; uint32_t pool = 0; + uint64_t pool_mask; dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); - if (RTE_ETH_DEV_SRIOV(dev).active) - pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx; - - /* replay MAC address configuration */ - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = dev->data->mac_addrs[i]; - - /* skip zero address */ - if (is_zero_ether_addr(&addr)) - continue; - - /* add address to the hardware */ - if (*dev->dev_ops->mac_addr_add && - (dev->data->mac_pool_sel[i] & (1ULL << pool))) - (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool); - else { - RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n", - port_id); - /* exit the loop but not return an error */ - break; + /* replay MAC address configuration including default MAC */ + addr = &dev->data->mac_addrs[0]; + if (*dev->dev_ops->mac_addr_set != NULL) + (*dev->dev_ops->mac_addr_set)(dev, addr); + else if (*dev->dev_ops->mac_addr_add != NULL) + (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); + + if (*dev->dev_ops->mac_addr_add != NULL) { + for (i = 1; i < dev_info.max_mac_addrs; i++) { + addr = &dev->data->mac_addrs[i]; + + /* skip zero address */ + if (is_zero_ether_addr(addr)) + continue; + + pool = 0; + pool_mask = dev->data->mac_pool_sel[i]; + + do { + if (pool_mask & 1ULL) + (*dev->dev_ops->mac_addr_add)(dev, + addr, i, pool); + pool_mask >>= 1; + pool++; + } while (pool_mask); } } @@ -1051,8 +986,10 @@ rte_eth_dev_close(uint8_t port_id) dev->data->dev_started = 0; (*dev->dev_ops->dev_close)(dev); + dev->data->nb_rx_queues = 0; rte_free(dev->data->rx_queues); dev->data->rx_queues = NULL; + dev->data->nb_tx_queues = 0; rte_free(dev->data->tx_queues); dev->data->tx_queues = NULL; } @@ -1067,6 +1004,7 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, uint32_t mbp_buf_size; struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + void **rxq; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1125,6 +1063,14 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, return -EINVAL; } + rxq = dev->data->rx_queues; + if (rxq[rx_queue_id]) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, + -ENOTSUP); + (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); + rxq[rx_queue_id] = NULL; + } + if (rx_conf == NULL) rx_conf = &dev_info.default_rxconf; @@ -1146,6 +1092,7 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + void **txq; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1178,6 +1125,14 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, return -EINVAL; } + txq = dev->data->tx_queues; + if (txq[tx_queue_id]) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, + -ENOTSUP); + (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); + txq[tx_queue_id] = NULL; + } + if (tx_conf == NULL) tx_conf = &dev_info.default_txconf; @@ -1234,6 +1189,20 @@ rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) return ret; } +int +rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + /* Validate Input Data. Bail if not valid or not supported. */ + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); + + /* Call driver to free pending mbufs. */ + return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], + free_cnt); +} + void rte_eth_promiscuous_enable(uint8_t port_id) { @@ -1393,12 +1362,19 @@ get_xstats_count(uint8_t port_id) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); dev = &rte_eth_devices[port_id]; + if (dev->dev_ops->xstats_get_names_by_id != NULL) { + count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, + NULL, 0); + if (count < 0) + return count; + } if (dev->dev_ops->xstats_get_names != NULL) { count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); if (count < 0) return count; } else count = 0; + count += RTE_NB_STATS; count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) * RTE_NB_RXQ_STATS; @@ -1407,10 +1383,171 @@ get_xstats_count(uint8_t port_id) return count; } +int +rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name, + uint64_t *id) +{ + int cnt_xstats, idx_xstat; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (!id) { + RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n"); + return -ENOMEM; + } + + if (!xstat_name) { + RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n"); + return -ENOMEM; + } + + /* Get count */ + cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); + if (cnt_xstats < 0) { + RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n"); + return -ENODEV; + } + + /* Get id-name lookup table */ + struct rte_eth_xstat_name xstats_names[cnt_xstats]; + + if (cnt_xstats != rte_eth_xstats_get_names_by_id( + port_id, xstats_names, cnt_xstats, NULL)) { + RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n"); + return -1; + } + + for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { + if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { + *id = idx_xstat; + return 0; + }; + } + + return -EINVAL; +} + +int +rte_eth_xstats_get_names_by_id(uint8_t port_id, + struct rte_eth_xstat_name *xstats_names, unsigned int size, + uint64_t *ids) +{ + /* Get all xstats */ + if (!ids) { + struct rte_eth_dev *dev; + int cnt_used_entries; + int cnt_expected_entries; + int cnt_driver_entries; + uint32_t idx, id_queue; + uint16_t num_q; + + cnt_expected_entries = get_xstats_count(port_id); + if (xstats_names == NULL || cnt_expected_entries < 0 || + (int)size < cnt_expected_entries) + return cnt_expected_entries; + + /* port_id checked in get_xstats_count() */ + dev = &rte_eth_devices[port_id]; + cnt_used_entries = 0; + + for (idx = 0; idx < RTE_NB_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "%s", rte_stats_strings[idx].name); + cnt_used_entries++; + } + num_q = RTE_MIN(dev->data->nb_rx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (id_queue = 0; id_queue < num_q; id_queue++) { + for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "rx_q%u%s", + id_queue, + rte_rxq_stats_strings[idx].name); + cnt_used_entries++; + } + + } + num_q = RTE_MIN(dev->data->nb_tx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (id_queue = 0; id_queue < num_q; id_queue++) { + for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "tx_q%u%s", + id_queue, + rte_txq_stats_strings[idx].name); + cnt_used_entries++; + } + } + + if (dev->dev_ops->xstats_get_names_by_id != NULL) { + /* If there are any driver-specific xstats, append them + * to end of list. + */ + cnt_driver_entries = + (*dev->dev_ops->xstats_get_names_by_id)( + dev, + xstats_names + cnt_used_entries, + NULL, + size - cnt_used_entries); + if (cnt_driver_entries < 0) + return cnt_driver_entries; + cnt_used_entries += cnt_driver_entries; + + } else if (dev->dev_ops->xstats_get_names != NULL) { + /* If there are any driver-specific xstats, append them + * to end of list. + */ + cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( + dev, + xstats_names + cnt_used_entries, + size - cnt_used_entries); + if (cnt_driver_entries < 0) + return cnt_driver_entries; + cnt_used_entries += cnt_driver_entries; + } + + return cnt_used_entries; + } + /* Get only xstats given by IDS */ + else { + uint16_t len, i; + struct rte_eth_xstat_name *xstats_names_copy; + + len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); + + xstats_names_copy = + malloc(sizeof(struct rte_eth_xstat_name) * len); + if (!xstats_names_copy) { + RTE_PMD_DEBUG_TRACE( + "ERROR: can't allocate memory for values_copy\n"); + free(xstats_names_copy); + return -1; + } + + rte_eth_xstats_get_names_by_id(port_id, xstats_names_copy, + len, NULL); + + for (i = 0; i < size; i++) { + if (ids[i] >= len) { + RTE_PMD_DEBUG_TRACE( + "ERROR: id value isn't valid\n"); + return -1; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + free(xstats_names_copy); + return size; + } +} + int rte_eth_xstats_get_names(uint8_t port_id, struct rte_eth_xstat_name *xstats_names, - unsigned size) + unsigned int size) { struct rte_eth_dev *dev; int cnt_used_entries; @@ -1473,14 +1610,140 @@ rte_eth_xstats_get_names(uint8_t port_id, } /* retrieve ethdev extended statistics */ +int +rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values, + unsigned int n) +{ + /* If need all xstats */ + if (!ids) { + struct rte_eth_stats eth_stats; + struct rte_eth_dev *dev; + unsigned int count = 0, i, q; + signed int xcount = 0; + uint64_t val, *stats_ptr; + uint16_t nb_rxqs, nb_txqs; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + nb_txqs = RTE_MIN(dev->data->nb_tx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + + /* Return generic statistics */ + count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) + + (nb_txqs * RTE_NB_TXQ_STATS); + + + /* implemented by the driver */ + if (dev->dev_ops->xstats_get_by_id != NULL) { + /* Retrieve the xstats from the driver at the end of the + * xstats struct. Retrieve all xstats. + */ + xcount = (*dev->dev_ops->xstats_get_by_id)(dev, + NULL, + values ? values + count : NULL, + (n > count) ? n - count : 0); + + if (xcount < 0) + return xcount; + /* implemented by the driver */ + } else if (dev->dev_ops->xstats_get != NULL) { + /* Retrieve the xstats from the driver at the end of the + * xstats struct. Retrieve all xstats. + * Compatibility for PMD without xstats_get_by_ids + */ + unsigned int size = (n > count) ? n - count : 1; + struct rte_eth_xstat xstats[size]; + + xcount = (*dev->dev_ops->xstats_get)(dev, + values ? xstats : NULL, size); + + if (xcount < 0) + return xcount; + + if (values != NULL) + for (i = 0 ; i < (unsigned int)xcount; i++) + values[i + count] = xstats[i].value; + } + + if (n < count + xcount || values == NULL) + return count + xcount; + + /* now fill the xstats structure */ + count = 0; + rte_eth_stats_get(port_id, ð_stats); + + /* global stats */ + for (i = 0; i < RTE_NB_STATS; i++) { + stats_ptr = RTE_PTR_ADD(ð_stats, + rte_stats_strings[i].offset); + val = *stats_ptr; + values[count++] = val; + } + + /* per-rxq stats */ + for (q = 0; q < nb_rxqs; q++) { + for (i = 0; i < RTE_NB_RXQ_STATS; i++) { + stats_ptr = RTE_PTR_ADD(ð_stats, + rte_rxq_stats_strings[i].offset + + q * sizeof(uint64_t)); + val = *stats_ptr; + values[count++] = val; + } + } + + /* per-txq stats */ + for (q = 0; q < nb_txqs; q++) { + for (i = 0; i < RTE_NB_TXQ_STATS; i++) { + stats_ptr = RTE_PTR_ADD(ð_stats, + rte_txq_stats_strings[i].offset + + q * sizeof(uint64_t)); + val = *stats_ptr; + values[count++] = val; + } + } + + return count + xcount; + } + /* Need only xstats given by IDS array */ + else { + uint16_t i, size; + uint64_t *values_copy; + + size = rte_eth_xstats_get_by_id(port_id, NULL, NULL, 0); + + values_copy = malloc(sizeof(*values_copy) * size); + if (!values_copy) { + RTE_PMD_DEBUG_TRACE( + "ERROR: can't allocate memory for values_copy\n"); + return -1; + } + + rte_eth_xstats_get_by_id(port_id, NULL, values_copy, size); + + for (i = 0; i < n; i++) { + if (ids[i] >= size) { + RTE_PMD_DEBUG_TRACE( + "ERROR: id value isn't valid\n"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + free(values_copy); + return n; + } +} + int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, - unsigned n) + unsigned int n) { struct rte_eth_stats eth_stats; struct rte_eth_dev *dev; - unsigned count = 0, i, q; - signed xcount = 0; + unsigned int count = 0, i, q; + signed int xcount = 0; uint64_t val, *stats_ptr; uint16_t nb_rxqs, nb_txqs; @@ -1606,6 +1869,18 @@ rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id, STAT_QMAP_RX); } +int +rte_eth_dev_fw_version_get(uint8_t port_id, char *fw_version, size_t fw_size) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); + return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size); +} + void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) { @@ -1625,7 +1900,6 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); - dev_info->pci_dev = dev->pci_dev; dev_info->driver_name = dev->data->drv_name; dev_info->nb_rx_queues = dev->data->nb_rx_queues; dev_info->nb_tx_queues = dev->data->nb_tx_queues; @@ -1883,13 +2157,7 @@ rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, if (!reta_conf) return -EINVAL; - if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) { - RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n", - RTE_RETA_GROUP_SIZE); - return -EINVAL; - } - - num = reta_size / RTE_RETA_GROUP_SIZE; + num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE; for (i = 0; i < num; i++) { if (reta_conf[i].mask) return 0; @@ -2101,6 +2369,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, struct rte_eth_dev *dev; int index; uint64_t pool_mask; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -2133,15 +2402,17 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, } /* Update NIC */ - (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); + ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); - /* Update address in NIC data structure */ - ether_addr_copy(addr, &dev->data->mac_addrs[index]); + if (ret == 0) { + /* Update address in NIC data structure */ + ether_addr_copy(addr, &dev->data->mac_addrs[index]); - /* Update pool bitmap in NIC data structure */ - dev->data->mac_pool_sel[index] |= (1ULL << pool); + /* Update pool bitmap in NIC data structure */ + dev->data->mac_pool_sel[index] |= (1ULL << pool); + } - return 0; + return ret; } int @@ -2194,32 +2465,6 @@ rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr) return 0; } -int -rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf, - uint16_t rx_mode, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf); - return -EINVAL; - } - - if (rx_mode == 0) { - RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n"); - return -EINVAL; - } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP); - return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on); -} /* * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find @@ -2309,76 +2554,6 @@ rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on) return (*dev->dev_ops->uc_all_hash_table_set)(dev, on); } -int -rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP); - return (*dev->dev_ops->set_vf_rx)(dev, vf, on); -} - -int -rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on) -{ - uint16_t num_vfs; - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - - num_vfs = dev_info.max_vfs; - if (vf > num_vfs) { - RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP); - return (*dev->dev_ops->set_vf_tx)(dev, vf, on); -} - -int -rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, - uint64_t vf_mask, uint8_t vlan_on) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - if (vlan_id > ETHER_MAX_VLAN_ID) { - RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n", - vlan_id); - return -EINVAL; - } - - if (vf_mask == 0) { - RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n"); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP); - return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id, - vf_mask, vlan_on); -} - int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx, uint16_t tx_rate) { @@ -2409,45 +2584,12 @@ int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx, return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate); } -int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate, - uint64_t q_msk) -{ - struct rte_eth_dev *dev; - struct rte_eth_dev_info dev_info; - struct rte_eth_link link; - - if (q_msk == 0) - return 0; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); - link = dev->data->dev_link; - - if (vf > dev_info.max_vfs) { - RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: " - "invalid vf id=%d\n", port_id, vf); - return -EINVAL; - } - - if (tx_rate > link.link_speed) { - RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, " - "bigger than link speed= %d\n", - tx_rate, link.link_speed); - return -EINVAL; - } - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP); - return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk); -} - int rte_eth_mirror_rule_set(uint8_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); if (mirror_conf->rule_type == 0) { @@ -2483,7 +2625,7 @@ rte_eth_mirror_rule_set(uint8_t port_id, int rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); @@ -2590,7 +2732,7 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, dev_cb = *cb_lst; cb_lst->active = 1; if (cb_arg != NULL) - dev_cb.cb_arg = (void *) cb_arg; + dev_cb.cb_arg = cb_arg; rte_spinlock_unlock(&rte_eth_dev_cb_lock); dev_cb.cb_fn(dev->data->port_id, dev_cb.event, @@ -2613,7 +2755,13 @@ rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - intr_handle = &dev->pci_dev->intr_handle; + + if (!dev->intr_handle) { + RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n"); + return -ENOTSUP; + } + + intr_handle = dev->intr_handle; if (!intr_handle->intr_vec) { RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n"); return -EPERM; @@ -2641,7 +2789,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.driver.name, ring_name, + dev->data->drv_name, ring_name, dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); @@ -2673,7 +2821,12 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, return -EINVAL; } - intr_handle = &dev->pci_dev->intr_handle; + if (!dev->intr_handle) { + RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n"); + return -ENOTSUP; + } + + intr_handle = dev->intr_handle; if (!intr_handle->intr_vec) { RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n"); return -EPERM; @@ -3266,26 +3419,6 @@ rte_eth_dev_get_dcb_info(uint8_t port_id, return (*dev->dev_ops->get_dcb_info)(dev, dcb_info); } -void -rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev) -{ - if ((eth_dev == NULL) || (pci_dev == NULL)) { - RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n", - eth_dev, pci_dev); - return; - } - - eth_dev->data->dev_flags = 0; - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) - eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE) - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; - - eth_dev->data->kdrv = pci_dev->kdrv; - eth_dev->data->numa_node = pci_dev->device.numa_node; - eth_dev->data->drv_name = pci_dev->driver->driver.name; -} - int rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel) diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index 96781792..0f38b45f 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -179,9 +179,9 @@ extern "C" { #include #include -#include #include #include +#include #include "rte_ether.h" #include "rte_eth_ctrl.h" #include "rte_dev_info.h" @@ -564,7 +564,7 @@ struct rte_eth_rss_reta_entry64 { /** * This enum indicates the possible number of traffic classes - * in DCB configratioins + * in DCB configurations */ enum rte_eth_nb_tcs { ETH_4_TCS = 4, /**< 4 TCs with DCB. */ @@ -702,6 +702,29 @@ struct rte_eth_desc_lim { uint16_t nb_max; /**< Max allowed number of descriptors. */ uint16_t nb_min; /**< Min allowed number of descriptors. */ uint16_t nb_align; /**< Number of descriptors should be aligned to. */ + + /** + * Max allowed number of segments per whole packet. + * + * - For TSO packet this is the total number of data descriptors allowed + * by device. + * + * @see nb_mtu_seg_max + */ + uint16_t nb_seg_max; + + /** + * Max number of segments per one MTU. + * + * - For non-TSO packet, this is the maximum allowed number of segments + * in a single transmit packet. + * + * - For TSO packet each segment within the TSO may span up to this + * value. + * + * @see nb_seg_max + */ + uint16_t nb_mtu_seg_max; }; /** @@ -792,9 +815,11 @@ struct rte_eth_udp_tunnel { */ struct rte_intr_conf { /** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */ - uint16_t lsc; + uint32_t lsc:1; /** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */ - uint16_t rxq; + uint32_t rxq:1; + /** enable/disable rmv interrupt. 0 (default) - disable, 1 enable */ + uint32_t rmv:1; }; /** @@ -857,6 +882,7 @@ struct rte_eth_conf { #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040 +#define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080 /** * TX offload capabilities of a device. @@ -874,6 +900,9 @@ struct rte_eth_conf { #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400 /**< Used for tunneling packet. */ #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800 /**< Used for tunneling packet. */ #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000 /**< Used for tunneling packet. */ +#define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000 + +struct rte_pci_device; /** * Ethernet device information @@ -938,23 +967,26 @@ struct rte_eth_txq_info { /** * An Ethernet device extended statistic structure * - * This structure is used by ethdev->eth_xstats_get() to provide - * statistics that are not provided in the generic rte_eth_stats + * This structure is used by rte_eth_xstats_get() to provide + * statistics that are not provided in the generic *rte_eth_stats* * structure. + * It maps a name id, corresponding to an index in the array returned + * by rte_eth_xstats_get_names(), to a statistic value. */ struct rte_eth_xstat { - uint64_t id; - uint64_t value; + uint64_t id; /**< The index in xstats name array. */ + uint64_t value; /**< The statistic counter value. */ }; /** - * A name-key lookup element for extended statistics. + * A name element for extended statistics. * - * This structure is used to map between names and ID numbers - * for extended ethernet statistics. + * An array of this structure is returned by rte_eth_xstats_get_names(). + * It lists the names of extended statistics for a PMD. The *rte_eth_xstat* + * structure references these names by their array index. */ struct rte_eth_xstat_name { - char name[RTE_ETH_XSTATS_NAME_SIZE]; + char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */ }; #define ETH_DCB_NUM_TCS 8 @@ -1001,15 +1033,6 @@ struct rte_eth_dev_callback; /** @internal Structure to keep track of registered callbacks */ TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback); - -#ifdef RTE_LIBRTE_ETHDEV_DEBUG -#define RTE_PMD_DEBUG_TRACE(...) \ - rte_pmd_debug_trace(__func__, __VA_ARGS__) -#else -#define RTE_PMD_DEBUG_TRACE(...) -#endif - - /* Macros to check for valid port */ #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \ if (!rte_eth_dev_is_valid_port(port_id)) { \ @@ -1089,6 +1112,12 @@ typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, unsigned n); /**< @internal Get extended stats of an Ethernet device. */ +typedef int (*eth_xstats_get_by_id_t)(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, + unsigned int n); +/**< @internal Get extended stats of an Ethernet device. */ + typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev); /**< @internal Reset extended stats of an Ethernet device. */ @@ -1096,6 +1125,11 @@ typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned size); /**< @internal Get names of extended stats of an Ethernet device. */ +typedef int (*eth_xstats_get_names_by_id_t)(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int size); +/**< @internal Get names of extended stats of an Ethernet device. */ + typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev, uint16_t queue_id, uint8_t stat_idx, @@ -1145,11 +1179,24 @@ typedef void (*eth_queue_release_t)(void *queue); typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev, uint16_t rx_queue_id); -/**< @internal Get number of available descriptors on a receive queue of an Ethernet device. */ +/**< @internal Get number of used descriptors on a receive queue. */ typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset); /**< @internal Check DD bit of specific RX descriptor */ +typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset); +/**< @internal Check the status of a Rx descriptor */ + +typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset); +/**< @internal Check the status of a Tx descriptor */ + +typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev, + char *fw_version, size_t fw_size); +/**< @internal Get firmware information of an Ethernet device. */ + +typedef int (*eth_tx_done_cleanup_t)(void *txq, uint32_t free_cnt); +/**< @internal Force mbufs to be from TX ring. */ + typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev, uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo); @@ -1191,6 +1238,11 @@ typedef uint16_t (*eth_tx_burst_t)(void *txq, uint16_t nb_pkts); /**< @internal Send output packets on a transmit queue of an Ethernet device. */ +typedef uint16_t (*eth_tx_prep_t)(void *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +/**< @internal Prepare output packets on a transmit queue of an Ethernet device. */ + typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); /**< @internal Get current flow control parameter on an Ethernet device */ @@ -1230,7 +1282,7 @@ typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev); typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index); /**< @internal Remove MAC address from receive address register */ -typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev, +typedef int (*eth_mac_addr_add_t)(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t vmdq); @@ -1249,39 +1301,11 @@ typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev, uint8_t on); /**< @internal Set all Unicast Hash bitmap */ -typedef int (*eth_set_vf_rx_mode_t)(struct rte_eth_dev *dev, - uint16_t vf, - uint16_t rx_mode, - uint8_t on); -/**< @internal Set a VF receive mode */ - -typedef int (*eth_set_vf_rx_t)(struct rte_eth_dev *dev, - uint16_t vf, - uint8_t on); -/**< @internal Set a VF receive mode */ - -typedef int (*eth_set_vf_tx_t)(struct rte_eth_dev *dev, - uint16_t vf, - uint8_t on); -/**< @internal Enable or disable a VF transmit */ - -typedef int (*eth_set_vf_vlan_filter_t)(struct rte_eth_dev *dev, - uint16_t vlan, - uint64_t vf_mask, - uint8_t vlan_on); -/**< @internal Set VF VLAN pool filter */ - typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); /**< @internal Set queue TX rate */ -typedef int (*eth_set_vf_rate_limit_t)(struct rte_eth_dev *dev, - uint16_t vf, - uint16_t tx_rate, - uint64_t q_msk); -/**< @internal Set VF TX rate */ - typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, @@ -1431,11 +1455,18 @@ struct eth_dev_ops { eth_dev_set_link_up_t dev_set_link_up; /**< Device link up. */ eth_dev_set_link_down_t dev_set_link_down; /**< Device link down. */ eth_dev_close_t dev_close; /**< Close device. */ + eth_link_update_t link_update; /**< Get device link state. */ + eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */ eth_promiscuous_disable_t promiscuous_disable;/**< Promiscuous OFF. */ eth_allmulticast_enable_t allmulticast_enable;/**< RX multicast ON. */ eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OF. */ - eth_link_update_t link_update; /**< Get device link state. */ + eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address. */ + eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address. */ + eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address. */ + eth_set_mc_addr_list_t set_mc_addr_list; /**< set list of mcast addrs. */ + mtu_set_t mtu_set; /**< Set MTU. */ + eth_stats_get_t stats_get; /**< Get generic device statistics. */ eth_stats_reset_t stats_reset; /**< Reset generic device statistics. */ eth_xstats_get_t xstats_get; /**< Get extended device statistics. */ @@ -1444,109 +1475,104 @@ struct eth_dev_ops { /**< Get names of extended statistics. */ eth_queue_stats_mapping_set_t queue_stats_mapping_set; /**< Configure per queue stat counter mapping. */ + eth_dev_infos_get_t dev_infos_get; /**< Get device info. */ + eth_rxq_info_get_t rxq_info_get; /**< retrieve RX queue information. */ + eth_txq_info_get_t txq_info_get; /**< retrieve TX queue information. */ + eth_fw_version_get_t fw_version_get; /**< Get firmware version. */ eth_dev_supported_ptypes_get_t dev_supported_ptypes_get; - /**< Get packet types supported and identified by device*/ - mtu_set_t mtu_set; /**< Set MTU. */ - vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */ - vlan_tpid_set_t vlan_tpid_set; /**< Outer/Inner VLAN TPID Setup. */ + /**< Get packet types supported and identified by device. */ + + vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */ + vlan_tpid_set_t vlan_tpid_set; /**< Outer/Inner VLAN TPID Setup. */ vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */ vlan_offload_set_t vlan_offload_set; /**< Set VLAN Offload. */ - vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion */ - eth_queue_start_t rx_queue_start;/**< Start RX for a queue.*/ - eth_queue_stop_t rx_queue_stop;/**< Stop RX for a queue.*/ - eth_queue_start_t tx_queue_start;/**< Start TX for a queue.*/ - eth_queue_stop_t tx_queue_stop;/**< Stop TX for a queue.*/ - eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue.*/ - eth_queue_release_t rx_queue_release;/**< Release RX queue.*/ - eth_rx_queue_count_t rx_queue_count; /**< Get Rx queue count. */ - eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit */ - /**< Enable Rx queue interrupt. */ - eth_rx_enable_intr_t rx_queue_intr_enable; - /**< Disable Rx queue interrupt.*/ - eth_rx_disable_intr_t rx_queue_intr_disable; - eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue.*/ - eth_queue_release_t tx_queue_release;/**< Release TX queue.*/ + vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion. */ + + eth_queue_start_t rx_queue_start;/**< Start RX for a queue. */ + eth_queue_stop_t rx_queue_stop; /**< Stop RX for a queue. */ + eth_queue_start_t tx_queue_start;/**< Start TX for a queue. */ + eth_queue_stop_t tx_queue_stop; /**< Stop TX for a queue. */ + eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue. */ + eth_queue_release_t rx_queue_release; /**< Release RX queue. */ + eth_rx_queue_count_t rx_queue_count; + /**< Get the number of used RX descriptors. */ + eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */ + eth_rx_descriptor_status_t rx_descriptor_status; + /**< Check the status of a Rx descriptor. */ + eth_tx_descriptor_status_t tx_descriptor_status; + /**< Check the status of a Tx descriptor. */ + eth_rx_enable_intr_t rx_queue_intr_enable; /**< Enable Rx queue interrupt. */ + eth_rx_disable_intr_t rx_queue_intr_disable; /**< Disable Rx queue interrupt. */ + eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue. */ + eth_queue_release_t tx_queue_release; /**< Release TX queue. */ + eth_tx_done_cleanup_t tx_done_cleanup;/**< Free tx ring mbufs */ + eth_dev_led_on_t dev_led_on; /**< Turn on LED. */ eth_dev_led_off_t dev_led_off; /**< Turn off LED. */ + flow_ctrl_get_t flow_ctrl_get; /**< Get flow control. */ flow_ctrl_set_t flow_ctrl_set; /**< Setup flow control. */ - priority_flow_ctrl_set_t priority_flow_ctrl_set; /**< Setup priority flow control.*/ - eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address */ - eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address */ - eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address */ - eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array */ - eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap */ - eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule.*/ - eth_mirror_rule_reset_t mirror_rule_reset; /**< reset a traffic mirror rule.*/ - eth_set_vf_rx_mode_t set_vf_rx_mode; /**< Set VF RX mode */ - eth_set_vf_rx_t set_vf_rx; /**< enable/disable a VF receive */ - eth_set_vf_tx_t set_vf_tx; /**< enable/disable a VF transmit */ - eth_set_vf_vlan_filter_t set_vf_vlan_filter; /**< Set VF VLAN filter */ - /** Add UDP tunnel port. */ - eth_udp_tunnel_port_add_t udp_tunnel_port_add; - /** Del UDP tunnel port. */ - eth_udp_tunnel_port_del_t udp_tunnel_port_del; - eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit */ - eth_set_vf_rate_limit_t set_vf_rate_limit; /**< Set VF rate limit */ - /** Update redirection table. */ - reta_update_t reta_update; - /** Query redirection table. */ - reta_query_t reta_query; - - eth_get_reg_t get_reg; - /**< Get registers */ - eth_get_eeprom_length_t get_eeprom_length; - /**< Get eeprom length */ - eth_get_eeprom_t get_eeprom; - /**< Get eeprom data */ - eth_set_eeprom_t set_eeprom; - /**< Set eeprom */ - /* bypass control */ + priority_flow_ctrl_set_t priority_flow_ctrl_set; /**< Setup priority flow control. */ + + eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array. */ + eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap. */ + + eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule. */ + eth_mirror_rule_reset_t mirror_rule_reset; /**< reset a traffic mirror rule. */ + + eth_udp_tunnel_port_add_t udp_tunnel_port_add; /** Add UDP tunnel port. */ + eth_udp_tunnel_port_del_t udp_tunnel_port_del; /** Del UDP tunnel port. */ + eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf; + /** Config ether type of l2 tunnel. */ + eth_l2_tunnel_offload_set_t l2_tunnel_offload_set; + /** Enable/disable l2 tunnel offload functions. */ + + eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit. */ + + rss_hash_update_t rss_hash_update; /** Configure RSS hash protocols. */ + rss_hash_conf_get_t rss_hash_conf_get; /** Get current RSS hash configuration. */ + reta_update_t reta_update; /** Update redirection table. */ + reta_query_t reta_query; /** Query redirection table. */ + + eth_get_reg_t get_reg; /**< Get registers. */ + eth_get_eeprom_length_t get_eeprom_length; /**< Get eeprom length. */ + eth_get_eeprom_t get_eeprom; /**< Get eeprom data. */ + eth_set_eeprom_t set_eeprom; /**< Set eeprom. */ + + /* bypass control */ #ifdef RTE_NIC_BYPASS - bypass_init_t bypass_init; - bypass_state_set_t bypass_state_set; - bypass_state_show_t bypass_state_show; - bypass_event_set_t bypass_event_set; - bypass_event_show_t bypass_event_show; - bypass_wd_timeout_set_t bypass_wd_timeout_set; - bypass_wd_timeout_show_t bypass_wd_timeout_show; - bypass_ver_show_t bypass_ver_show; - bypass_wd_reset_t bypass_wd_reset; + bypass_init_t bypass_init; + bypass_state_set_t bypass_state_set; + bypass_state_show_t bypass_state_show; + bypass_event_set_t bypass_event_set; + bypass_event_show_t bypass_event_show; + bypass_wd_timeout_set_t bypass_wd_timeout_set; + bypass_wd_timeout_show_t bypass_wd_timeout_show; + bypass_ver_show_t bypass_ver_show; + bypass_wd_reset_t bypass_wd_reset; #endif - /** Configure RSS hash protocols. */ - rss_hash_update_t rss_hash_update; - /** Get current RSS hash configuration. */ - rss_hash_conf_get_t rss_hash_conf_get; - eth_filter_ctrl_t filter_ctrl; - /**< common filter control. */ - eth_set_mc_addr_list_t set_mc_addr_list; /**< set list of mcast addrs */ - eth_rxq_info_get_t rxq_info_get; - /**< retrieve RX queue information. */ - eth_txq_info_get_t txq_info_get; - /**< retrieve TX queue information. */ + eth_filter_ctrl_t filter_ctrl; /**< common filter control. */ + + eth_get_dcb_info get_dcb_info; /** Get DCB information. */ + + eth_timesync_enable_t timesync_enable; /** Turn IEEE1588/802.1AS timestamping on. */ - eth_timesync_enable_t timesync_enable; + eth_timesync_disable_t timesync_disable; /** Turn IEEE1588/802.1AS timestamping off. */ - eth_timesync_disable_t timesync_disable; - /** Read the IEEE1588/802.1AS RX timestamp. */ eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp; - /** Read the IEEE1588/802.1AS TX timestamp. */ + /** Read the IEEE1588/802.1AS RX timestamp. */ eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp; - - /** Get DCB information */ - eth_get_dcb_info get_dcb_info; - /** Adjust the device clock.*/ - eth_timesync_adjust_time timesync_adjust_time; - /** Get the device clock time. */ - eth_timesync_read_time timesync_read_time; - /** Set the device clock time. */ - eth_timesync_write_time timesync_write_time; - /** Config ether type of l2 tunnel */ - eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf; - /** Enable/disable l2 tunnel offload functions */ - eth_l2_tunnel_offload_set_t l2_tunnel_offload_set; + /** Read the IEEE1588/802.1AS TX timestamp. */ + eth_timesync_adjust_time timesync_adjust_time; /** Adjust the device clock. */ + eth_timesync_read_time timesync_read_time; /** Get the device clock time. */ + eth_timesync_write_time timesync_write_time; /** Set the device clock time. */ + + eth_xstats_get_by_id_t xstats_get_by_id; + /**< Get extended device statistic values by ID. */ + eth_xstats_get_names_by_id_t xstats_get_names_by_id; + /**< Get name of extended device statistics by ID. */ }; /** @@ -1612,6 +1638,14 @@ struct rte_eth_rxtx_callback { void *param; }; +/** + * A set of values to describe the possible states of an eth device. + */ +enum rte_eth_dev_state { + RTE_ETH_DEV_UNUSED = 0, + RTE_ETH_DEV_ATTACHED, +}; + /** * @internal * The generic data structure associated with each ethernet device. @@ -1625,10 +1659,11 @@ struct rte_eth_rxtx_callback { struct rte_eth_dev { eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */ eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */ + eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */ struct rte_eth_dev_data *data; /**< Pointer to device data */ - const struct eth_driver *driver;/**< Driver for this device */ const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */ - struct rte_pci_device *pci_dev; /**< PCI info. supplied by probing */ + struct rte_device *device; /**< Backing device */ + struct rte_intr_handle *intr_handle; /**< Device interrupt handle */ /** User application callbacks for NIC interrupts */ struct rte_eth_dev_cb_list link_intr_cbs; /** @@ -1641,7 +1676,7 @@ struct rte_eth_dev { * received packets before passing them to the driver for transmission. */ struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; - uint8_t attached; /**< Flag indicating the port is attached */ + enum rte_eth_dev_state state; /**< Flag indicating the port state */ } __rte_cache_aligned; struct rte_eth_dev_sriov { @@ -1711,6 +1746,8 @@ struct rte_eth_dev_data { #define RTE_ETH_DEV_INTR_LSC 0x0002 /** Device is a bonded slave */ #define RTE_ETH_DEV_BONDED_SLAVE 0x0004 +/** Device supports device removal interrupt */ +#define RTE_ETH_DEV_INTR_RMV 0x0008 /** * @internal @@ -1719,6 +1756,25 @@ struct rte_eth_dev_data { */ extern struct rte_eth_dev rte_eth_devices[]; +/** + * Iterates over valid ethdev ports. + * + * @param port_id + * The id of the next possible valid port. + * @return + * Next valid port id, RTE_MAX_ETHPORTS if there is none. + */ +uint8_t rte_eth_find_next(uint8_t port_id); + +/** + * Macro to iterate over all enabled ethdev ports. + */ +#define RTE_ETH_FOREACH_DEV(p) \ + for (p = rte_eth_find_next(0); \ + (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \ + p = rte_eth_find_next(p + 1)) + + /** * Get the total number of Ethernet devices that have been successfully * initialized by the [matching] Ethernet driver during the PCI probing phase. @@ -1727,7 +1783,7 @@ extern struct rte_eth_dev rte_eth_devices[]; * immediately after invoking rte_eal_init(). * If the application unplugs a port using hotplug function, The enabled port * numbers may be noncontiguous. In the case, the applications need to manage - * enabled port by themselves. + * enabled port by using the ``RTE_ETH_FOREACH_DEV()`` macro. * * @return * - The total number of usable Ethernet devices. @@ -1757,6 +1813,19 @@ struct rte_eth_dev *rte_eth_dev_allocated(const char *name); */ struct rte_eth_dev *rte_eth_dev_allocate(const char *name); +/** + * @internal + * Attach to the ethdev already initialized by the primary + * process. + * + * @param name Ethernet device's name. + * @return + * - Success: Slot in the rte_dev_devices array for attached + * device. + * - Error: Null pointer. + */ +struct rte_eth_dev *rte_eth_dev_attach_secondary(const char *name); + /** * @internal * Release the specified ethdev port. @@ -1769,7 +1838,7 @@ struct rte_eth_dev *rte_eth_dev_allocate(const char *name); int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev); /** - * Attach a new Ethernet device specified by aruguments. + * Attach a new Ethernet device specified by arguments. * * @param devargs * A pointer to a strings array describing the new device @@ -1796,78 +1865,6 @@ int rte_eth_dev_attach(const char *devargs, uint8_t *port_id); */ int rte_eth_dev_detach(uint8_t port_id, char *devname); -struct eth_driver; -/** - * @internal - * Initialization function of an Ethernet driver invoked for each matching - * Ethernet PCI device detected during the PCI probing phase. - * - * @param eth_dev - * The *eth_dev* pointer is the address of the *rte_eth_dev* structure - * associated with the matching device and which have been [automatically] - * allocated in the *rte_eth_devices* array. - * The *eth_dev* structure is supplied to the driver initialization function - * with the following fields already initialized: - * - * - *pci_dev*: Holds the pointers to the *rte_pci_device* structure which - * contains the generic PCI information of the matching device. - * - * - *driver*: Holds the pointer to the *eth_driver* structure. - * - * - *dev_private*: Holds a pointer to the device private data structure. - * - * - *mtu*: Contains the default Ethernet maximum frame length (1500). - * - * - *port_id*: Contains the port index of the device (actually the index - * of the *eth_dev* structure in the *rte_eth_devices* array). - * - * @return - * - 0: Success, the device is properly initialized by the driver. - * In particular, the driver MUST have set up the *dev_ops* pointer - * of the *eth_dev* structure. - * - <0: Error code of the device initialization failure. - */ -typedef int (*eth_dev_init_t)(struct rte_eth_dev *eth_dev); - -/** - * @internal - * Finalization function of an Ethernet driver invoked for each matching - * Ethernet PCI device detected during the PCI closing phase. - * - * @param eth_dev - * The *eth_dev* pointer is the address of the *rte_eth_dev* structure - * associated with the matching device and which have been [automatically] - * allocated in the *rte_eth_devices* array. - * @return - * - 0: Success, the device is properly finalized by the driver. - * In particular, the driver MUST free the *dev_ops* pointer - * of the *eth_dev* structure. - * - <0: Error code of the device initialization failure. - */ -typedef int (*eth_dev_uninit_t)(struct rte_eth_dev *eth_dev); - -/** - * @internal - * The structure associated with a PMD Ethernet driver. - * - * Each Ethernet driver acts as a PCI driver and is represented by a generic - * *eth_driver* structure that holds: - * - * - An *rte_pci_driver* structure (which must be the first field). - * - * - The *eth_dev_init* function invoked for each matching PCI device. - * - * - The *eth_dev_uninit* function invoked for each matching PCI device. - * - * - The size of the private data to allocate for each matching device. - */ -struct eth_driver { - struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */ - eth_dev_init_t eth_dev_init; /**< Device init function. */ - eth_dev_uninit_t eth_dev_uninit; /**< Device uninit function. */ - unsigned int dev_private_size; /**< Size of device private data. */ -}; - /** * Convert a numerical speed in Mbps to a bitmap flag that can be used in * the bitmap link_speeds of the struct rte_eth_conf @@ -1913,6 +1910,19 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex); int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf); +/** + * @internal + * Release device queues and clear its configuration to force the user + * application to reconfigure it. It is for internal use only. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * void + */ +void _rte_eth_dev_reset(struct rte_eth_dev *dev); + /** * Allocate and set up a receive queue for an Ethernet device. * @@ -2272,22 +2282,23 @@ void rte_eth_stats_reset(uint8_t port_id); * @param port_id * The port identifier of the Ethernet device. * @param xstats_names - * Block of memory to insert names into. Must be at least size in capacity. - * If set to NULL, function returns required capacity. + * An rte_eth_xstat_name array of at least *size* elements to + * be filled. If set to NULL, the function returns the required number + * of elements. * @param size - * Capacity of xstats_names (number of names). + * The size of the xstats_names array (number of elements). * @return - * - positive value lower or equal to size: success. The return value + * - A positive value lower or equal to size: success. The return value * is the number of entries filled in the stats table. - * - positive value higher than size: error, the given statistics table + * - A positive value higher than size: error, the given statistics table * is too small. The return value corresponds to the size that should * be given to succeed. The entries in the table are not valid and * shall not be used by the caller. - * - negative value on error (invalid port id) + * - A negative value on error (invalid port id). */ int rte_eth_xstats_get_names(uint8_t port_id, struct rte_eth_xstat_name *xstats_names, - unsigned size); + unsigned int size); /** * Retrieve extended statistics of an Ethernet device. @@ -2296,22 +2307,96 @@ int rte_eth_xstats_get_names(uint8_t port_id, * The port identifier of the Ethernet device. * @param xstats * A pointer to a table of structure of type *rte_eth_xstat* - * to be filled with device statistics ids and values. + * to be filled with device statistics ids and values: id is the + * index of the name string in xstats_names (see rte_eth_xstats_get_names()), + * and value is the statistic counter. * This parameter can be set to NULL if n is 0. * @param n - * The size of the stats table, which should be large enough to store - * all the statistics of the device. + * The size of the xstats array (number of elements). * @return - * - positive value lower or equal to n: success. The return value + * - A positive value lower or equal to n: success. The return value * is the number of entries filled in the stats table. - * - positive value higher than n: error, the given statistics table + * - A positive value higher than n: error, the given statistics table * is too small. The return value corresponds to the size that should * be given to succeed. The entries in the table are not valid and * shall not be used by the caller. - * - negative value on error (invalid port id) + * - A negative value on error (invalid port id). */ int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, - unsigned n); + unsigned int n); + +/** + * Retrieve names of extended statistics of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param xstats_names + * An rte_eth_xstat_name array of at least *size* elements to + * be filled. If set to NULL, the function returns the required number + * of elements. + * @param ids + * IDs array given by app to retrieve specific statistics + * @param size + * The size of the xstats_names array (number of elements). + * @return + * - A positive value lower or equal to size: success. The return value + * is the number of entries filled in the stats table. + * - A positive value higher than size: error, the given statistics table + * is too small. The return value corresponds to the size that should + * be given to succeed. The entries in the table are not valid and + * shall not be used by the caller. + * - A negative value on error (invalid port id). + */ +int +rte_eth_xstats_get_names_by_id(uint8_t port_id, + struct rte_eth_xstat_name *xstats_names, unsigned int size, + uint64_t *ids); + +/** + * Retrieve extended statistics of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param ids + * A pointer to an ids array passed by application. This tells wich + * statistics values function should retrieve. This parameter + * can be set to NULL if n is 0. In this case function will retrieve + * all avalible statistics. + * @param values + * A pointer to a table to be filled with device statistics values. + * @param n + * The size of the ids array (number of elements). + * @return + * - A positive value lower or equal to n: success. The return value + * is the number of entries filled in the stats table. + * - A positive value higher than n: error, the given statistics table + * is too small. The return value corresponds to the size that should + * be given to succeed. The entries in the table are not valid and + * shall not be used by the caller. + * - A negative value on error (invalid port id). + */ +int rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, + uint64_t *values, unsigned int n); + +/** + * Gets the ID of a statistic from its name. + * + * This function searches for the statistics using string compares, and + * as such should not be used on the fast-path. For fast-path retrieval of + * specific statistics, store the ID as provided in *id* from this function, + * and pass the ID to rte_eth_xstats_get() + * + * @param port_id The port to look up statistics from + * @param xstat_name The name of the statistic to return + * @param[out] id A pointer to an app-supplied uint64_t which should be + * set to the ID of the stat if the stat exists. + * @return + * 0 on success + * -ENODEV for invalid port_id, + * -EINVAL if the xstat_name doesn't exist in port_id + */ +int rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name, + uint64_t *id); /** * Reset extended statistics of an Ethernet device. @@ -2384,6 +2469,27 @@ void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr); */ void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info); +/** + * Retrieve the firmware version of a device. + * + * @param port_id + * The port identifier of the device. + * @param fw_version + * A pointer to a string array storing the firmware version of a device, + * the string includes terminating null. This pointer is allocated by caller. + * @param fw_size + * The size of the string array pointed by fw_version, which should be + * large enough to store firmware version of the device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if operation is not supported. + * - (-ENODEV) if *port_id* invalid. + * - (>0) if *fw_size* is not enough to store firmware version, return + * the size of the non truncated string. + */ +int rte_eth_dev_fw_version_get(uint8_t port_id, + char *fw_version, size_t fw_size); + /** * Retrieve the supported packet types of an Ethernet device. * @@ -2413,7 +2519,7 @@ void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info); * @param ptype_mask * A hint of what kind of packet type which the caller is interested in. * @param ptypes - * An array pointer to store adequent packet types, allocated by caller. + * An array pointer to store adequate packet types, allocated by caller. * @param num * Size of the array pointed by param ptypes. * @return @@ -2553,12 +2659,12 @@ int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask); int rte_eth_dev_get_vlan_offload(uint8_t port_id); /** - * Set port based TX VLAN insersion on or off. + * Set port based TX VLAN insertion on or off. * * @param port_id * The port identifier of the Ethernet device. * @param pvid - * Port based TX VLAN identifier togeth with user priority. + * Port based TX VLAN identifier together with user priority. * @param on * Turn on or off the port based TX VLAN insertion. * @@ -2615,7 +2721,7 @@ int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on); * method to retrieve bursts of received packets and to immediately * queue them for further parallel processing by another logical core, * for instance. However, instead of having received packets being - * individually queued by the driver, this approach allows the invoker + * individually queued by the driver, this approach allows the caller * of the rte_eth_rx_burst() function to queue a burst of retrieved * packets at a time and therefore dramatically reduce the cost of * enqueue/dequeue operations per packet. @@ -2684,7 +2790,7 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, } /** - * Get the number of used descriptors in a specific queue + * Get the number of used descriptors of a rx queue * * @param port_id * The port identifier of the Ethernet device. @@ -2692,16 +2798,21 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, * The queue id on the specific port. * @return * The number of used descriptors in the specific queue, or: - * (-EINVAL) if *port_id* is invalid + * (-EINVAL) if *port_id* or *queue_id* is invalid * (-ENOTSUP) if the device does not support this function */ static inline int rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_dev *dev; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP); - return (*dev->dev_ops->rx_queue_count)(dev, queue_id); + if (queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + return (*dev->dev_ops->rx_queue_count)(dev, queue_id); } /** @@ -2729,6 +2840,121 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset) dev->data->rx_queues[queue_id], offset); } +#define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */ +#define RTE_ETH_RX_DESC_DONE 1 /**< Desc done, filled by hw. */ +#define RTE_ETH_RX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */ + +/** + * Check the status of a Rx descriptor in the queue + * + * It should be called in a similar context than the Rx function: + * - on a dataplane core + * - not concurrently on the same queue + * + * Since it's a dataplane function, no check is performed on port_id and + * queue_id. The caller must therefore ensure that the port is enabled + * and the queue is configured and running. + * + * Note: accessing to a random descriptor in the ring may trigger cache + * misses and have a performance impact. + * + * @param port_id + * A valid port identifier of the Ethernet device which. + * @param queue_id + * A valid Rx queue identifier on this port. + * @param offset + * The offset of the descriptor starting from tail (0 is the next + * packet to be received by the driver). + * + * @return + * - (RTE_ETH_RX_DESC_AVAIL): Descriptor is available for the hardware to + * receive a packet. + * - (RTE_ETH_RX_DESC_DONE): Descriptor is done, it is filled by hw, but + * not yet processed by the driver (i.e. in the receive queue). + * - (RTE_ETH_RX_DESC_UNAVAIL): Descriptor is unavailable, either hold by + * the driver and not yet returned to hw, or reserved by the hw. + * - (-EINVAL) bad descriptor offset. + * - (-ENOTSUP) if the device does not support this function. + * - (-ENODEV) bad port or queue (only if compiled with debug). + */ +static inline int +rte_eth_rx_descriptor_status(uint8_t port_id, uint16_t queue_id, + uint16_t offset) +{ + struct rte_eth_dev *dev; + void *rxq; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); +#endif + dev = &rte_eth_devices[port_id]; +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (queue_id >= dev->data->nb_rx_queues) + return -ENODEV; +#endif + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP); + rxq = dev->data->rx_queues[queue_id]; + + return (*dev->dev_ops->rx_descriptor_status)(rxq, offset); +} + +#define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */ +#define RTE_ETH_TX_DESC_DONE 1 /**< Desc done, packet is transmitted. */ +#define RTE_ETH_TX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */ + +/** + * Check the status of a Tx descriptor in the queue. + * + * It should be called in a similar context than the Tx function: + * - on a dataplane core + * - not concurrently on the same queue + * + * Since it's a dataplane function, no check is performed on port_id and + * queue_id. The caller must therefore ensure that the port is enabled + * and the queue is configured and running. + * + * Note: accessing to a random descriptor in the ring may trigger cache + * misses and have a performance impact. + * + * @param port_id + * A valid port identifier of the Ethernet device which. + * @param queue_id + * A valid Tx queue identifier on this port. + * @param offset + * The offset of the descriptor starting from tail (0 is the place where + * the next packet will be send). + * + * @return + * - (RTE_ETH_TX_DESC_FULL) Descriptor is being processed by the hw, i.e. + * in the transmit queue. + * - (RTE_ETH_TX_DESC_DONE) Hardware is done with this descriptor, it can + * be reused by the driver. + * - (RTE_ETH_TX_DESC_UNAVAIL): Descriptor is unavailable, reserved by the + * driver or the hardware. + * - (-EINVAL) bad descriptor offset. + * - (-ENOTSUP) if the device does not support this function. + * - (-ENODEV) bad port or queue (only if compiled with debug). + */ +static inline int rte_eth_tx_descriptor_status(uint8_t port_id, + uint16_t queue_id, uint16_t offset) +{ + struct rte_eth_dev *dev; + void *txq; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); +#endif + dev = &rte_eth_devices[port_id]; +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (queue_id >= dev->data->nb_tx_queues) + return -ENODEV; +#endif + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP); + txq = dev->data->tx_queues[queue_id]; + + return (*dev->dev_ops->tx_descriptor_status)(txq, offset); +} + /** * Send a burst of output packets on a transmit queue of an Ethernet device. * @@ -2819,6 +3045,115 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id, return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts); } +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Process a burst of output packets on a transmit queue of an Ethernet device. + * + * The rte_eth_tx_prepare() function is invoked to prepare output packets to be + * transmitted on the output queue *queue_id* of the Ethernet device designated + * by its *port_id*. + * The *nb_pkts* parameter is the number of packets to be prepared which are + * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them + * allocated from a pool created with rte_pktmbuf_pool_create(). + * For each packet to send, the rte_eth_tx_prepare() function performs + * the following operations: + * + * - Check if packet meets devices requirements for tx offloads. + * + * - Check limitations about number of segments. + * + * - Check additional requirements when debug is enabled. + * + * - Update and/or reset required checksums when tx offload is set for packet. + * + * Since this function can modify packet data, provided mbufs must be safely + * writable (e.g. modified data cannot be in shared segment). + * + * The rte_eth_tx_prepare() function returns the number of packets ready to be + * sent. A return value equal to *nb_pkts* means that all packets are valid and + * ready to be sent, otherwise stops processing on the first invalid packet and + * leaves the rest packets untouched. + * + * When this functionality is not implemented in the driver, all packets are + * are returned untouched. + * + * @param port_id + * The port identifier of the Ethernet device. + * The value must be a valid port id. + * @param queue_id + * The index of the transmit queue through which output packets must be + * sent. + * The value must be in the range [0, nb_tx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param tx_pkts + * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures + * which contain the output packets. + * @param nb_pkts + * The maximum number of packets to process. + * @return + * The number of packets correct and ready to be sent. The return value can be + * less than the value of the *tx_pkts* parameter when some packet doesn't + * meet devices requirements with rte_errno set appropriately: + * - -EINVAL: offload flags are not correctly set + * - -ENOTSUP: the offload feature is not supported by the hardware + * + */ + +#ifndef RTE_ETHDEV_TX_PREPARE_NOOP + +static inline uint16_t +rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id); + rte_errno = -EINVAL; + return 0; + } +#endif + + dev = &rte_eth_devices[port_id]; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (queue_id >= dev->data->nb_tx_queues) { + RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id); + rte_errno = -EINVAL; + return 0; + } +#endif + + if (!dev->tx_pkt_prepare) + return nb_pkts; + + return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id], + tx_pkts, nb_pkts); +} + +#else + +/* + * Native NOOP operation for compilation targets which doesn't require any + * preparations steps, and functional NOOP may introduce unnecessary performance + * drop. + * + * Generally this is not a good idea to turn it on globally and didn't should + * be used if behavior of tx_preparation can change. + */ + +static inline uint16_t +rte_eth_tx_prepare(__rte_unused uint8_t port_id, __rte_unused uint16_t queue_id, + __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + return nb_pkts; +} + +#endif + typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count, void *userdata); @@ -3023,6 +3358,33 @@ void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata); +/** + * Request the driver to free mbufs currently cached by the driver. The + * driver will only free the mbuf if it is no longer in use. It is the + * application's responsibity to ensure rte_eth_tx_buffer_flush(..) is + * called if needed. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the transmit queue through which output packets must be + * sent. + * The value must be in the range [0, nb_tx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param free_cnt + * Maximum number of packets to free. Use 0 to indicate all possible packets + * should be freed. Note that a packet may be using multiple mbufs. + * @return + * Failure: < 0 + * -ENODEV: Invalid interface + * -ENOTSUP: Driver does not support function + * Success: >= 0 + * 0-n: Number of packets freed. More packets may still remain in ring that + * are in use. + */ +int +rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt); + /** * The eth device event type for interrupt, and maybe others in the future. */ @@ -3034,6 +3396,8 @@ enum rte_eth_event_type { RTE_ETH_EVENT_INTR_RESET, /**< reset interrupt event, sent to VF on PF reset */ RTE_ETH_EVENT_VF_MBOX, /**< message from the VF received by PF */ + RTE_ETH_EVENT_MACSEC, /**< MACsec offload related event */ + RTE_ETH_EVENT_INTR_RMV, /**< device removal event */ RTE_ETH_EVENT_MAX /**< max value of this enum */ }; @@ -3112,7 +3476,7 @@ void _rte_eth_dev_callback_process(struct rte_eth_dev *dev, /** * When there is no rx packet coming in Rx Queue for a long time, we can * sleep lcore related to RX Queue for power saving, and enable rx interrupt - * to be triggered when rx packect arrives. + * to be triggered when Rx packet arrives. * * The rte_eth_dev_rx_intr_enable() function enables rx queue * interrupt on specific rx queue of a port. @@ -3403,93 +3767,6 @@ int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr, */ int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on); - /** - * Set RX L2 Filtering mode of a VF of an Ethernet device. - * - * @param port - * The port identifier of the Ethernet device. - * @param vf - * VF id. - * @param rx_mode - * The RX mode mask, which is one or more of accepting Untagged Packets, - * packets that match the PFUTA table, Broadcast and Multicast Promiscuous. - * ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC, - * ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used - * in rx_mode. - * @param on - * 1 - Enable a VF RX mode. - * 0 - Disable a VF RX mode. - * @return - * - (0) if successful. - * - (-ENOTSUP) if hardware doesn't support. - * - (-ENOTSUP) if hardware doesn't support. - * - (-EINVAL) if bad parameter. - */ -int rte_eth_dev_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mode, - uint8_t on); - -/** -* Enable or disable a VF traffic transmit of the Ethernet device. -* -* @param port -* The port identifier of the Ethernet device. -* @param vf -* VF id. -* @param on -* 1 - Enable a VF traffic transmit. -* 0 - Disable a VF traffic transmit. -* @return -* - (0) if successful. -* - (-ENODEV) if *port_id* invalid. -* - (-ENOTSUP) if hardware doesn't support. -* - (-EINVAL) if bad parameter. -*/ -int -rte_eth_dev_set_vf_tx(uint8_t port,uint16_t vf, uint8_t on); - -/** -* Enable or disable a VF traffic receive of an Ethernet device. -* -* @param port -* The port identifier of the Ethernet device. -* @param vf -* VF id. -* @param on -* 1 - Enable a VF traffic receive. -* 0 - Disable a VF traffic receive. -* @return -* - (0) if successful. -* - (-ENOTSUP) if hardware doesn't support. -* - (-ENODEV) if *port_id* invalid. -* - (-EINVAL) if bad parameter. -*/ -int -rte_eth_dev_set_vf_rx(uint8_t port,uint16_t vf, uint8_t on); - -/** -* Enable/Disable hardware VF VLAN filtering by an Ethernet device of -* received VLAN packets tagged with a given VLAN Tag Identifier. -* -* @param port id -* The port identifier of the Ethernet device. -* @param vlan_id -* The VLAN Tag Identifier whose filtering must be enabled or disabled. -* @param vf_mask -* Bitmap listing which VFs participate in the VLAN filtering. -* @param vlan_on -* 1 - Enable VFs VLAN filtering. -* 0 - Disable VFs VLAN filtering. -* @return -* - (0) if successful. -* - (-ENOTSUP) if hardware doesn't support. -* - (-ENODEV) if *port_id* invalid. -* - (-EINVAL) if bad parameter. -*/ -int -rte_eth_dev_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, - uint64_t vf_mask, - uint8_t vlan_on); - /** * Set a traffic mirroring rule on an Ethernet device * @@ -3550,26 +3827,6 @@ int rte_eth_mirror_rule_reset(uint8_t port_id, int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx, uint16_t tx_rate); -/** - * Set the rate limitation for a vf on an Ethernet device. - * - * @param port_id - * The port identifier of the Ethernet device. - * @param vf - * VF id. - * @param tx_rate - * The tx rate allocated from the total link speed for this VF id. - * @param q_msk - * The queue mask which need to set the rate. - * @return - * - (0) if successful. - * - (-ENOTSUP) if hardware doesn't support this feature. - * - (-ENODEV) if *port_id* invalid. - * - (-EINVAL) if bad parameter. - */ -int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, - uint16_t tx_rate, uint64_t q_msk); - /** * Initialize bypass logic. This function needs to be called before * executing any other bypass API. @@ -3773,7 +4030,7 @@ rte_eth_dev_rss_hash_conf_get(uint8_t port_id, * The packets with this UDP port will be identified as this type of tunnel. * Before enabling any offloading function for a tunnel, users can call this API * to change or add more UDP port for the tunnel. So the offloading function - * can take effect on the packets with the sepcific UDP port. + * can take effect on the packets with the specific UDP port. * * @param port_id * The port identifier of the Ethernet device. @@ -3795,7 +4052,7 @@ rte_eth_dev_udp_tunnel_port_add(uint8_t port_id, * any more. * Before enabling any offloading function for a tunnel, users can call this API * to delete a UDP port for the tunnel. So the offloading function will not take - * effect on the packets with the sepcific UDP port. + * effect on the packets with the specific UDP port. * * @param port_id * The port identifier of the Ethernet device. @@ -3889,31 +4146,31 @@ int rte_eth_dev_get_dcb_info(uint8_t port_id, void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param); -/* -* Add a callback that must be called first on packet RX on a given port -* and queue. -* -* This API configures a first function to be called for each burst of -* packets received on a given NIC port queue. The return value is a pointer -* that can be used to later remove the callback using -* rte_eth_remove_rx_callback(). -* -* Multiple functions are called in the order that they are added. -* -* @param port_id -* The port identifier of the Ethernet device. -* @param queue_id -* The queue on the Ethernet device on which the callback is to be added. -* @param fn -* The callback function -* @param user_param -* A generic pointer parameter which will be passed to each invocation of the -* callback function on this port and queue. -* -* @return -* NULL on error. -* On success, a pointer value which can later be used to remove the callback. -*/ +/** + * Add a callback that must be called first on packet RX on a given port + * and queue. + * + * This API configures a first function to be called for each burst of + * packets received on a given NIC port queue. The return value is a pointer + * that can be used to later remove the callback using + * rte_eth_remove_rx_callback(). + * + * Multiple functions are called in the order that they are added. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The queue on the Ethernet device on which the callback is to be added. + * @param fn + * The callback function + * @param user_param + * A generic pointer parameter which will be passed to each invocation of the + * callback function on this port and queue. + * + * @return + * NULL on error. + * On success, a pointer value which can later be used to remove the callback. + */ void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param); @@ -4250,20 +4507,6 @@ int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time); */ int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time); -/** - * Copy pci device info to the Ethernet device data. - * - * @param eth_dev - * The *eth_dev* pointer is the address of the *rte_eth_dev* structure. - * @param pci_dev - * The *pci_dev* pointer is the address of the *rte_pci_device* structure. - * - * @return - * - 0 on success, negative on error - */ -void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, - struct rte_pci_device *pci_dev); - /** * Create memzone for HW rings. * malloc can't be used as the physical address is needed. @@ -4336,7 +4579,7 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id, uint8_t en); /** -* Get the port id from pci adrress or device name +* Get the port id from pci address or device name * Ex: 0000:2:00.0 or vdev name net_pcap0 * * @param name @@ -4364,21 +4607,6 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id); int rte_eth_dev_get_name_by_port(uint8_t port_id, char *name); -/** - * @internal - * Wrapper for use by pci drivers as a .probe function to attach to a ethdev - * interface. - */ -int rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv, - struct rte_pci_device *pci_dev); - -/** - * @internal - * Wrapper for use by pci drivers as a .remove function to detach a ethdev - * interface. - */ -int rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev); - #ifdef __cplusplus } #endif diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ether/rte_ethdev_pci.h new file mode 100644 index 00000000..d3bc03cf --- /dev/null +++ b/lib/librte_ether/rte_ethdev_pci.h @@ -0,0 +1,193 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Brocade Communications Systems, Inc. + * Author: Jan Blunck + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_ETHDEV_PCI_H_ +#define _RTE_ETHDEV_PCI_H_ + +#include +#include +#include + +/** + * Copy pci device info to the Ethernet device data. + * + * @param eth_dev + * The *eth_dev* pointer is the address of the *rte_eth_dev* structure. + * @param pci_dev + * The *pci_dev* pointer is the address of the *rte_pci_device* structure. + * + * @return + * - 0 on success, negative on error + */ +static inline void +rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, + struct rte_pci_device *pci_dev) +{ + if ((eth_dev == NULL) || (pci_dev == NULL)) { + RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n", + eth_dev, pci_dev); + return; + } + + eth_dev->intr_handle = &pci_dev->intr_handle; + + eth_dev->data->dev_flags = 0; + if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_RMV) + eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_RMV; + + eth_dev->data->kdrv = pci_dev->kdrv; + eth_dev->data->numa_node = pci_dev->device.numa_node; + eth_dev->data->drv_name = pci_dev->driver->driver.name; +} + +/** + * @internal + * Allocates a new ethdev slot for an ethernet device and returns the pointer + * to that slot for the driver to use. + * + * @param dev + * Pointer to the PCI device + * + * @param private_data_size + * Size of private data structure + * + * @return + * A pointer to a rte_eth_dev or NULL if allocation failed. + */ +static inline struct rte_eth_dev * +rte_eth_dev_pci_allocate(struct rte_pci_device *dev, size_t private_data_size) +{ + struct rte_eth_dev *eth_dev; + const char *name; + + if (!dev) + return NULL; + + name = dev->device.name; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) + return NULL; + + if (private_data_size) { + eth_dev->data->dev_private = rte_zmalloc_socket(name, + private_data_size, RTE_CACHE_LINE_SIZE, + dev->device.numa_node); + if (!eth_dev->data->dev_private) { + rte_eth_dev_release_port(eth_dev); + return NULL; + } + } + } else { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) + return NULL; + } + + eth_dev->device = &dev->device; + eth_dev->intr_handle = &dev->intr_handle; + rte_eth_copy_pci_info(eth_dev, dev); + return eth_dev; +} + +static inline void +rte_eth_dev_pci_release(struct rte_eth_dev *eth_dev) +{ + /* free ether device */ + rte_eth_dev_release_port(eth_dev); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(eth_dev->data->dev_private); + + eth_dev->data->dev_private = NULL; + + eth_dev->device = NULL; + eth_dev->intr_handle = NULL; +} + +typedef int (*eth_dev_pci_callback_t)(struct rte_eth_dev *eth_dev); + +/** + * @internal + * Wrapper for use by pci drivers in a .probe function to attach to a ethdev + * interface. + */ +static inline int +rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev, + size_t private_data_size, eth_dev_pci_callback_t dev_init) +{ + struct rte_eth_dev *eth_dev; + int ret; + + eth_dev = rte_eth_dev_pci_allocate(pci_dev, private_data_size); + if (!eth_dev) + return -ENOMEM; + + RTE_FUNC_PTR_OR_ERR_RET(*dev_init, -EINVAL); + ret = dev_init(eth_dev); + if (ret) + rte_eth_dev_pci_release(eth_dev); + + return ret; +} + +/** + * @internal + * Wrapper for use by pci drivers in a .remove function to detach a ethdev + * interface. + */ +static inline int +rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev, + eth_dev_pci_callback_t dev_uninit) +{ + struct rte_eth_dev *eth_dev; + int ret; + + eth_dev = rte_eth_dev_allocated(pci_dev->device.name); + if (!eth_dev) + return -ENODEV; + + if (dev_uninit) { + ret = dev_uninit(eth_dev); + if (ret) + return ret; + } + + rte_eth_dev_pci_release(eth_dev); + return 0; +} + +#endif /* _RTE_ETHDEV_PCI_H_ */ diff --git a/lib/librte_ether/rte_ethdev_vdev.h b/lib/librte_ether/rte_ethdev_vdev.h new file mode 100644 index 00000000..fa2cb61e --- /dev/null +++ b/lib/librte_ether/rte_ethdev_vdev.h @@ -0,0 +1,84 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Brocade Communications Systems, Inc. + * Author: Jan Blunck + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_ETHDEV_VDEV_H_ +#define _RTE_ETHDEV_VDEV_H_ + +#include +#include +#include + +/** + * @internal + * Allocates a new ethdev slot for an ethernet device and returns the pointer + * to that slot for the driver to use. + * + * @param dev + * Pointer to virtual device + * + * @param private_data_size + * Size of private data structure + * + * @return + * A pointer to a rte_eth_dev or NULL if allocation failed. + */ +static inline struct rte_eth_dev * +rte_eth_vdev_allocate(struct rte_vdev_device *dev, size_t private_data_size) +{ + struct rte_eth_dev *eth_dev; + const char *name = rte_vdev_device_name(dev); + + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) + return NULL; + + if (private_data_size) { + eth_dev->data->dev_private = rte_zmalloc_socket(name, + private_data_size, RTE_CACHE_LINE_SIZE, + dev->device.numa_node); + if (!eth_dev->data->dev_private) { + rte_eth_dev_release_port(eth_dev); + return NULL; + } + } + + eth_dev->device = &dev->device; + eth_dev->intr_handle = NULL; + + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->data->numa_node = dev->device.numa_node; + eth_dev->data->drv_name = dev->device.driver->name; + return eth_dev; +} + +#endif /* _RTE_ETHDEV_VDEV_H_ */ diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map index fd622635..d6726bb1 100644 --- a/lib/librte_ether/rte_ether_version.map +++ b/lib/librte_ether/rte_ether_version.map @@ -7,7 +7,6 @@ DPDK_2.2 { rte_eth_allmulticast_disable; rte_eth_allmulticast_enable; rte_eth_allmulticast_get; - rte_eth_copy_pci_info; rte_eth_dev_allocate; rte_eth_dev_allocated; rte_eth_dev_attach; @@ -60,10 +59,6 @@ DPDK_2.2 { rte_eth_dev_set_mtu; rte_eth_dev_set_rx_queue_stats_mapping; rte_eth_dev_set_tx_queue_stats_mapping; - rte_eth_dev_set_vf_rx; - rte_eth_dev_set_vf_rxmode; - rte_eth_dev_set_vf_tx; - rte_eth_dev_set_vf_vlan_filter; rte_eth_dev_set_vlan_offload; rte_eth_dev_set_vlan_pvid; rte_eth_dev_set_vlan_strip_on_queue; @@ -93,7 +88,6 @@ DPDK_2.2 { rte_eth_rx_queue_info_get; rte_eth_rx_queue_setup; rte_eth_set_queue_rate_limit; - rte_eth_set_vf_rate_limit; rte_eth_stats; rte_eth_stats_get; rte_eth_stats_reset; @@ -139,10 +133,26 @@ DPDK_16.07 { } DPDK_16.04; -DPDK_16.11 { +DPDK_17.02 { global: - rte_eth_dev_pci_probe; - rte_eth_dev_pci_remove; + _rte_eth_dev_reset; + rte_eth_dev_fw_version_get; + rte_flow_create; + rte_flow_destroy; + rte_flow_flush; + rte_flow_query; + rte_flow_validate; } DPDK_16.07; + +DPDK_17.05 { + global: + + rte_eth_dev_attach_secondary; + rte_eth_find_next; + rte_eth_xstats_get_by_id; + rte_eth_xstats_get_id_by_name; + rte_eth_xstats_get_names_by_id; + +} DPDK_17.02; diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ether/rte_flow.c new file mode 100644 index 00000000..aaa70d68 --- /dev/null +++ b/lib/librte_ether/rte_flow.c @@ -0,0 +1,159 @@ +/*- + * BSD LICENSE + * + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include +#include "rte_ethdev.h" +#include "rte_flow_driver.h" +#include "rte_flow.h" + +/* Get generic flow operations structure from a port. */ +const struct rte_flow_ops * +rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops; + int code; + + if (unlikely(!rte_eth_dev_is_valid_port(port_id))) + code = ENODEV; + else if (unlikely(!dev->dev_ops->filter_ctrl || + dev->dev_ops->filter_ctrl(dev, + RTE_ETH_FILTER_GENERIC, + RTE_ETH_FILTER_GET, + &ops) || + !ops)) + code = ENOSYS; + else + return ops; + rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(code)); + return NULL; +} + +/* Check whether a flow rule can be created on a given port. */ +int +rte_flow_validate(uint8_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->validate)) + return ops->validate(dev, attr, pattern, actions, error); + return -rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); +} + +/* Create a flow rule on a given port. */ +struct rte_flow * +rte_flow_create(uint8_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return NULL; + if (likely(!!ops->create)) + return ops->create(dev, attr, pattern, actions, error); + rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); + return NULL; +} + +/* Destroy a flow rule on a given port. */ +int +rte_flow_destroy(uint8_t port_id, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->destroy)) + return ops->destroy(dev, flow, error); + return -rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); +} + +/* Destroy all flow rules associated with a port. */ +int +rte_flow_flush(uint8_t port_id, + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->flush)) + return ops->flush(dev, error); + return -rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); +} + +/* Query an existing flow rule. */ +int +rte_flow_query(uint8_t port_id, + struct rte_flow *flow, + enum rte_flow_action_type action, + void *data, + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (!ops) + return -rte_errno; + if (likely(!!ops->query)) + return ops->query(dev, flow, action, data, error); + return -rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); +} diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h new file mode 100644 index 00000000..c47edbc9 --- /dev/null +++ b/lib/librte_ether/rte_flow.h @@ -0,0 +1,1198 @@ +/*- + * BSD LICENSE + * + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_FLOW_H_ +#define RTE_FLOW_H_ + +/** + * @file + * RTE generic flow API + * + * This interface provides the ability to program packet matching and + * associated actions in hardware through flow rules. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Flow rule attributes. + * + * Priorities are set on two levels: per group and per rule within groups. + * + * Lower values denote higher priority, the highest priority for both levels + * is 0, so that a rule with priority 0 in group 8 is always matched after a + * rule with priority 8 in group 0. + * + * Although optional, applications are encouraged to group similar rules as + * much as possible to fully take advantage of hardware capabilities + * (e.g. optimized matching) and work around limitations (e.g. a single + * pattern type possibly allowed in a given group). + * + * Group and priority levels are arbitrary and up to the application, they + * do not need to be contiguous nor start from 0, however the maximum number + * varies between devices and may be affected by existing flow rules. + * + * If a packet is matched by several rules of a given group for a given + * priority level, the outcome is undefined. It can take any path, may be + * duplicated or even cause unrecoverable errors. + * + * Note that support for more than a single group and priority level is not + * guaranteed. + * + * Flow rules can apply to inbound and/or outbound traffic (ingress/egress). + * + * Several pattern items and actions are valid and can be used in both + * directions. Those valid for only one direction are described as such. + * + * At least one direction must be specified. + * + * Specifying both directions at once for a given rule is not recommended + * but may be valid in a few cases (e.g. shared counter). + */ +struct rte_flow_attr { + uint32_t group; /**< Priority group. */ + uint32_t priority; /**< Priority level within group. */ + uint32_t ingress:1; /**< Rule applies to ingress traffic. */ + uint32_t egress:1; /**< Rule applies to egress traffic. */ + uint32_t reserved:30; /**< Reserved, must be zero. */ +}; + +/** + * Matching pattern item types. + * + * Pattern items fall in two categories: + * + * - Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4, + * IPV6, ICMP, UDP, TCP, SCTP, VXLAN and so on), usually associated with a + * specification structure. These must be stacked in the same order as the + * protocol layers to match, starting from the lowest. + * + * - Matching meta-data or affecting pattern processing (END, VOID, INVERT, + * PF, VF, PORT and so on), often without a specification structure. Since + * they do not match packet contents, these can be specified anywhere + * within item lists without affecting others. + * + * See the description of individual types for more information. Those + * marked with [META] fall into the second category. + */ +enum rte_flow_item_type { + /** + * [META] + * + * End marker for item lists. Prevents further processing of items, + * thereby ending the pattern. + * + * No associated specification structure. + */ + RTE_FLOW_ITEM_TYPE_END, + + /** + * [META] + * + * Used as a placeholder for convenience. It is ignored and simply + * discarded by PMDs. + * + * No associated specification structure. + */ + RTE_FLOW_ITEM_TYPE_VOID, + + /** + * [META] + * + * Inverted matching, i.e. process packets that do not match the + * pattern. + * + * No associated specification structure. + */ + RTE_FLOW_ITEM_TYPE_INVERT, + + /** + * Matches any protocol in place of the current layer, a single ANY + * may also stand for several protocol layers. + * + * See struct rte_flow_item_any. + */ + RTE_FLOW_ITEM_TYPE_ANY, + + /** + * [META] + * + * Matches packets addressed to the physical function of the device. + * + * If the underlying device function differs from the one that would + * normally receive the matched traffic, specifying this item + * prevents it from reaching that device unless the flow rule + * contains a PF action. Packets are not duplicated between device + * instances by default. + * + * No associated specification structure. + */ + RTE_FLOW_ITEM_TYPE_PF, + + /** + * [META] + * + * Matches packets addressed to a virtual function ID of the device. + * + * If the underlying device function differs from the one that would + * normally receive the matched traffic, specifying this item + * prevents it from reaching that device unless the flow rule + * contains a VF action. Packets are not duplicated between device + * instances by default. + * + * See struct rte_flow_item_vf. + */ + RTE_FLOW_ITEM_TYPE_VF, + + /** + * [META] + * + * Matches packets coming from the specified physical port of the + * underlying device. + * + * The first PORT item overrides the physical port normally + * associated with the specified DPDK input port (port_id). This + * item can be provided several times to match additional physical + * ports. + * + * See struct rte_flow_item_port. + */ + RTE_FLOW_ITEM_TYPE_PORT, + + /** + * Matches a byte string of a given length at a given offset. + * + * See struct rte_flow_item_raw. + */ + RTE_FLOW_ITEM_TYPE_RAW, + + /** + * Matches an Ethernet header. + * + * See struct rte_flow_item_eth. + */ + RTE_FLOW_ITEM_TYPE_ETH, + + /** + * Matches an 802.1Q/ad VLAN tag. + * + * See struct rte_flow_item_vlan. + */ + RTE_FLOW_ITEM_TYPE_VLAN, + + /** + * Matches an IPv4 header. + * + * See struct rte_flow_item_ipv4. + */ + RTE_FLOW_ITEM_TYPE_IPV4, + + /** + * Matches an IPv6 header. + * + * See struct rte_flow_item_ipv6. + */ + RTE_FLOW_ITEM_TYPE_IPV6, + + /** + * Matches an ICMP header. + * + * See struct rte_flow_item_icmp. + */ + RTE_FLOW_ITEM_TYPE_ICMP, + + /** + * Matches a UDP header. + * + * See struct rte_flow_item_udp. + */ + RTE_FLOW_ITEM_TYPE_UDP, + + /** + * Matches a TCP header. + * + * See struct rte_flow_item_tcp. + */ + RTE_FLOW_ITEM_TYPE_TCP, + + /** + * Matches a SCTP header. + * + * See struct rte_flow_item_sctp. + */ + RTE_FLOW_ITEM_TYPE_SCTP, + + /** + * Matches a VXLAN header. + * + * See struct rte_flow_item_vxlan. + */ + RTE_FLOW_ITEM_TYPE_VXLAN, + + /** + * Matches a E_TAG header. + * + * See struct rte_flow_item_e_tag. + */ + RTE_FLOW_ITEM_TYPE_E_TAG, + + /** + * Matches a NVGRE header. + * + * See struct rte_flow_item_nvgre. + */ + RTE_FLOW_ITEM_TYPE_NVGRE, + + /** + * Matches a MPLS header. + * + * See struct rte_flow_item_mpls. + */ + RTE_FLOW_ITEM_TYPE_MPLS, + + /** + * Matches a GRE header. + * + * See struct rte_flow_item_gre. + */ + RTE_FLOW_ITEM_TYPE_GRE, +}; + +/** + * RTE_FLOW_ITEM_TYPE_ANY + * + * Matches any protocol in place of the current layer, a single ANY may also + * stand for several protocol layers. + * + * This is usually specified as the first pattern item when looking for a + * protocol anywhere in a packet. + * + * A zeroed mask stands for any number of layers. + */ +struct rte_flow_item_any { + uint32_t num; /**< Number of layers covered. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_ANY. */ +#ifndef __cplusplus +static const struct rte_flow_item_any rte_flow_item_any_mask = { + .num = 0x00000000, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_VF + * + * Matches packets addressed to a virtual function ID of the device. + * + * If the underlying device function differs from the one that would + * normally receive the matched traffic, specifying this item prevents it + * from reaching that device unless the flow rule contains a VF + * action. Packets are not duplicated between device instances by default. + * + * - Likely to return an error or never match any traffic if this causes a + * VF device to match traffic addressed to a different VF. + * - Can be specified multiple times to match traffic addressed to several + * VF IDs. + * - Can be combined with a PF item to match both PF and VF traffic. + * + * A zeroed mask can be used to match any VF ID. + */ +struct rte_flow_item_vf { + uint32_t id; /**< Destination VF ID. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_VF. */ +#ifndef __cplusplus +static const struct rte_flow_item_vf rte_flow_item_vf_mask = { + .id = 0x00000000, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_PORT + * + * Matches packets coming from the specified physical port of the underlying + * device. + * + * The first PORT item overrides the physical port normally associated with + * the specified DPDK input port (port_id). This item can be provided + * several times to match additional physical ports. + * + * Note that physical ports are not necessarily tied to DPDK input ports + * (port_id) when those are not under DPDK control. Possible values are + * specific to each device, they are not necessarily indexed from zero and + * may not be contiguous. + * + * As a device property, the list of allowed values as well as the value + * associated with a port_id should be retrieved by other means. + * + * A zeroed mask can be used to match any port index. + */ +struct rte_flow_item_port { + uint32_t index; /**< Physical port index. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_PORT. */ +#ifndef __cplusplus +static const struct rte_flow_item_port rte_flow_item_port_mask = { + .index = 0x00000000, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_RAW + * + * Matches a byte string of a given length at a given offset. + * + * Offset is either absolute (using the start of the packet) or relative to + * the end of the previous matched item in the stack, in which case negative + * values are allowed. + * + * If search is enabled, offset is used as the starting point. The search + * area can be delimited by setting limit to a nonzero value, which is the + * maximum number of bytes after offset where the pattern may start. + * + * Matching a zero-length pattern is allowed, doing so resets the relative + * offset for subsequent items. + * + * This type does not support ranges (struct rte_flow_item.last). + */ +struct rte_flow_item_raw { + uint32_t relative:1; /**< Look for pattern after the previous item. */ + uint32_t search:1; /**< Search pattern from offset (see also limit). */ + uint32_t reserved:30; /**< Reserved, must be set to zero. */ + int32_t offset; /**< Absolute or relative offset for pattern. */ + uint16_t limit; /**< Search area limit for start of pattern. */ + uint16_t length; /**< Pattern length. */ + uint8_t pattern[]; /**< Byte string to look for. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */ +#ifndef __cplusplus +static const struct rte_flow_item_raw rte_flow_item_raw_mask = { + .relative = 1, + .search = 1, + .reserved = 0x3fffffff, + .offset = 0xffffffff, + .limit = 0xffff, + .length = 0xffff, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_ETH + * + * Matches an Ethernet header. + */ +struct rte_flow_item_eth { + struct ether_addr dst; /**< Destination MAC. */ + struct ether_addr src; /**< Source MAC. */ + uint16_t type; /**< EtherType. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */ +#ifndef __cplusplus +static const struct rte_flow_item_eth rte_flow_item_eth_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .type = 0x0000, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_VLAN + * + * Matches an 802.1Q/ad VLAN tag. + * + * This type normally follows either RTE_FLOW_ITEM_TYPE_ETH or + * RTE_FLOW_ITEM_TYPE_VLAN. + */ +struct rte_flow_item_vlan { + uint16_t tpid; /**< Tag protocol identifier. */ + uint16_t tci; /**< Tag control information. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */ +#ifndef __cplusplus +static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = { + .tpid = 0x0000, + .tci = 0xffff, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_IPV4 + * + * Matches an IPv4 header. + * + * Note: IPv4 options are handled by dedicated pattern items. + */ +struct rte_flow_item_ipv4 { + struct ipv4_hdr hdr; /**< IPv4 header definition. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */ +#ifndef __cplusplus +static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = { + .hdr = { + .src_addr = 0xffffffff, + .dst_addr = 0xffffffff, + }, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_IPV6. + * + * Matches an IPv6 header. + * + * Note: IPv6 options are handled by dedicated pattern items. + */ +struct rte_flow_item_ipv6 { + struct ipv6_hdr hdr; /**< IPv6 header definition. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */ +#ifndef __cplusplus +static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + }, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_ICMP. + * + * Matches an ICMP header. + */ +struct rte_flow_item_icmp { + struct icmp_hdr hdr; /**< ICMP header definition. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP. */ +#ifndef __cplusplus +static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = { + .hdr = { + .icmp_type = 0xff, + .icmp_code = 0xff, + }, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_UDP. + * + * Matches a UDP header. + */ +struct rte_flow_item_udp { + struct udp_hdr hdr; /**< UDP header definition. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */ +#ifndef __cplusplus +static const struct rte_flow_item_udp rte_flow_item_udp_mask = { + .hdr = { + .src_port = 0xffff, + .dst_port = 0xffff, + }, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_TCP. + * + * Matches a TCP header. + */ +struct rte_flow_item_tcp { + struct tcp_hdr hdr; /**< TCP header definition. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */ +#ifndef __cplusplus +static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = { + .hdr = { + .src_port = 0xffff, + .dst_port = 0xffff, + }, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_SCTP. + * + * Matches a SCTP header. + */ +struct rte_flow_item_sctp { + struct sctp_hdr hdr; /**< SCTP header definition. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_SCTP. */ +#ifndef __cplusplus +static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = { + .hdr = { + .src_port = 0xffff, + .dst_port = 0xffff, + }, +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_VXLAN. + * + * Matches a VXLAN header (RFC 7348). + */ +struct rte_flow_item_vxlan { + uint8_t flags; /**< Normally 0x08 (I flag). */ + uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */ + uint8_t vni[3]; /**< VXLAN identifier. */ + uint8_t rsvd1; /**< Reserved, normally 0x00. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN. */ +#ifndef __cplusplus +static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = { + .vni = "\xff\xff\xff", +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_E_TAG. + * + * Matches a E-tag header. + */ +struct rte_flow_item_e_tag { + uint16_t tpid; /**< Tag protocol identifier (0x893F). */ + /** + * E-Tag control information (E-TCI). + * E-PCP (3b), E-DEI (1b), ingress E-CID base (12b). + */ + uint16_t epcp_edei_in_ecid_b; + /** Reserved (2b), GRP (2b), E-CID base (12b). */ + uint16_t rsvd_grp_ecid_b; + uint8_t in_ecid_e; /**< Ingress E-CID ext. */ + uint8_t ecid_e; /**< E-CID ext. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */ +#ifndef __cplusplus +static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + .rsvd_grp_ecid_b = 0x3fff, +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + .rsvd_grp_ecid_b = 0xff3f, +#else +#error Unsupported endianness. +#endif +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_NVGRE. + * + * Matches a NVGRE header. + */ +struct rte_flow_item_nvgre { + /** + * Checksum (1b), undefined (1b), key bit (1b), sequence number (1b), + * reserved 0 (9b), version (3b). + * + * c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637. + */ + uint16_t c_k_s_rsvd0_ver; + uint16_t protocol; /**< Protocol type (0x6558). */ + uint8_t tni[3]; /**< Virtual subnet ID. */ + uint8_t flow_id; /**< Flow ID. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_NVGRE. */ +#ifndef __cplusplus +static const struct rte_flow_item_nvgre rte_flow_item_nvgre_mask = { + .tni = "\xff\xff\xff", +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_MPLS. + * + * Matches a MPLS header. + */ +struct rte_flow_item_mpls { + /** + * Label (20b), TC (3b), Bottom of Stack (1b). + */ + uint8_t label_tc_s[3]; + uint8_t ttl; /** Time-to-Live. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_MPLS. */ +#ifndef __cplusplus +static const struct rte_flow_item_mpls rte_flow_item_mpls_mask = { + .label_tc_s = "\xff\xff\xf0", +}; +#endif + +/** + * RTE_FLOW_ITEM_TYPE_GRE. + * + * Matches a GRE header. + */ +struct rte_flow_item_gre { + /** + * Checksum (1b), reserved 0 (12b), version (3b). + * Refer to RFC 2784. + */ + uint16_t c_rsvd0_ver; + uint16_t protocol; /**< Protocol type. */ +}; + +/** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */ +#ifndef __cplusplus +static const struct rte_flow_item_gre rte_flow_item_gre_mask = { + .protocol = 0xffff, +}; +#endif + +/** + * Matching pattern item definition. + * + * A pattern is formed by stacking items starting from the lowest protocol + * layer to match. This stacking restriction does not apply to meta items + * which can be placed anywhere in the stack without affecting the meaning + * of the resulting pattern. + * + * Patterns are terminated by END items. + * + * The spec field should be a valid pointer to a structure of the related + * item type. It may remain unspecified (NULL) in many cases to request + * broad (nonspecific) matching. In such cases, last and mask must also be + * set to NULL. + * + * Optionally, last can point to a structure of the same type to define an + * inclusive range. This is mostly supported by integer and address fields, + * may cause errors otherwise. Fields that do not support ranges must be set + * to 0 or to the same value as the corresponding fields in spec. + * + * Only the fields defined to nonzero values in the default masks (see + * rte_flow_item_{name}_mask constants) are considered relevant by + * default. This can be overridden by providing a mask structure of the + * same type with applicable bits set to one. It can also be used to + * partially filter out specific fields (e.g. as an alternate mean to match + * ranges of IP addresses). + * + * Mask is a simple bit-mask applied before interpreting the contents of + * spec and last, which may yield unexpected results if not used + * carefully. For example, if for an IPv4 address field, spec provides + * 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the + * effective range becomes 10.1.0.0 to 10.3.255.255. + */ +struct rte_flow_item { + enum rte_flow_item_type type; /**< Item type. */ + const void *spec; /**< Pointer to item specification structure. */ + const void *last; /**< Defines an inclusive range (spec to last). */ + const void *mask; /**< Bit-mask applied to spec and last. */ +}; + +/** + * Action types. + * + * Each possible action is represented by a type. Some have associated + * configuration structures. Several actions combined in a list can be + * affected to a flow rule. That list is not ordered. + * + * They fall in three categories: + * + * - Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent + * processing matched packets by subsequent flow rules, unless overridden + * with PASSTHRU. + * + * - Non terminating actions (PASSTHRU, DUP) that leave matched packets up + * for additional processing by subsequent flow rules. + * + * - Other non terminating meta actions that do not affect the fate of + * packets (END, VOID, MARK, FLAG, COUNT). + * + * When several actions are combined in a flow rule, they should all have + * different types (e.g. dropping a packet twice is not possible). + * + * Only the last action of a given type is taken into account. PMDs still + * perform error checking on the entire list. + * + * Note that PASSTHRU is the only action able to override a terminating + * rule. + */ +enum rte_flow_action_type { + /** + * [META] + * + * End marker for action lists. Prevents further processing of + * actions, thereby ending the list. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_END, + + /** + * [META] + * + * Used as a placeholder for convenience. It is ignored and simply + * discarded by PMDs. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_VOID, + + /** + * Leaves packets up for additional processing by subsequent flow + * rules. This is the default when a rule does not contain a + * terminating action, but can be specified to force a rule to + * become non-terminating. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_PASSTHRU, + + /** + * [META] + * + * Attaches an integer value to packets and sets PKT_RX_FDIR and + * PKT_RX_FDIR_ID mbuf flags. + * + * See struct rte_flow_action_mark. + */ + RTE_FLOW_ACTION_TYPE_MARK, + + /** + * [META] + * + * Flags packets. Similar to MARK without a specific value; only + * sets the PKT_RX_FDIR mbuf flag. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_FLAG, + + /** + * Assigns packets to a given queue index. + * + * See struct rte_flow_action_queue. + */ + RTE_FLOW_ACTION_TYPE_QUEUE, + + /** + * Drops packets. + * + * PASSTHRU overrides this action if both are specified. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_DROP, + + /** + * [META] + * + * Enables counters for this rule. + * + * These counters can be retrieved and reset through rte_flow_query(), + * see struct rte_flow_query_count. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_COUNT, + + /** + * Duplicates packets to a given queue index. + * + * This is normally combined with QUEUE, however when used alone, it + * is actually similar to QUEUE + PASSTHRU. + * + * See struct rte_flow_action_dup. + */ + RTE_FLOW_ACTION_TYPE_DUP, + + /** + * Similar to QUEUE, except RSS is additionally performed on packets + * to spread them among several queues according to the provided + * parameters. + * + * See struct rte_flow_action_rss. + */ + RTE_FLOW_ACTION_TYPE_RSS, + + /** + * Redirects packets to the physical function (PF) of the current + * device. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_PF, + + /** + * Redirects packets to the virtual function (VF) of the current + * device with the specified ID. + * + * See struct rte_flow_action_vf. + */ + RTE_FLOW_ACTION_TYPE_VF, +}; + +/** + * RTE_FLOW_ACTION_TYPE_MARK + * + * Attaches an integer value to packets and sets PKT_RX_FDIR and + * PKT_RX_FDIR_ID mbuf flags. + * + * This value is arbitrary and application-defined. Maximum allowed value + * depends on the underlying implementation. It is returned in the + * hash.fdir.hi mbuf field. + */ +struct rte_flow_action_mark { + uint32_t id; /**< Integer value to return with packets. */ +}; + +/** + * RTE_FLOW_ACTION_TYPE_QUEUE + * + * Assign packets to a given queue index. + * + * Terminating by default. + */ +struct rte_flow_action_queue { + uint16_t index; /**< Queue index to use. */ +}; + +/** + * RTE_FLOW_ACTION_TYPE_COUNT (query) + * + * Query structure to retrieve and reset flow rule counters. + */ +struct rte_flow_query_count { + uint32_t reset:1; /**< Reset counters after query [in]. */ + uint32_t hits_set:1; /**< hits field is set [out]. */ + uint32_t bytes_set:1; /**< bytes field is set [out]. */ + uint32_t reserved:29; /**< Reserved, must be zero [in, out]. */ + uint64_t hits; /**< Number of hits for this rule [out]. */ + uint64_t bytes; /**< Number of bytes through this rule [out]. */ +}; + +/** + * RTE_FLOW_ACTION_TYPE_DUP + * + * Duplicates packets to a given queue index. + * + * This is normally combined with QUEUE, however when used alone, it is + * actually similar to QUEUE + PASSTHRU. + * + * Non-terminating by default. + */ +struct rte_flow_action_dup { + uint16_t index; /**< Queue index to duplicate packets to. */ +}; + +/** + * RTE_FLOW_ACTION_TYPE_RSS + * + * Similar to QUEUE, except RSS is additionally performed on packets to + * spread them among several queues according to the provided parameters. + * + * Note: RSS hash result is stored in the hash.rss mbuf field which overlaps + * hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only, + * both can be requested simultaneously. + * + * Terminating by default. + */ +struct rte_flow_action_rss { + const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */ + uint16_t num; /**< Number of entries in queue[]. */ + uint16_t queue[]; /**< Queues indices to use. */ +}; + +/** + * RTE_FLOW_ACTION_TYPE_VF + * + * Redirects packets to a virtual function (VF) of the current device. + * + * Packets matched by a VF pattern item can be redirected to their original + * VF ID instead of the specified one. This parameter may not be available + * and is not guaranteed to work properly if the VF part is matched by a + * prior flow rule or if packets are not addressed to a VF in the first + * place. + * + * Terminating by default. + */ +struct rte_flow_action_vf { + uint32_t original:1; /**< Use original VF ID if possible. */ + uint32_t reserved:31; /**< Reserved, must be zero. */ + uint32_t id; /**< VF ID to redirect packets to. */ +}; + +/** + * Definition of a single action. + * + * A list of actions is terminated by a END action. + * + * For simple actions without a configuration structure, conf remains NULL. + */ +struct rte_flow_action { + enum rte_flow_action_type type; /**< Action type. */ + const void *conf; /**< Pointer to action configuration structure. */ +}; + +/** + * Opaque type returned after successfully creating a flow. + * + * This handle can be used to manage and query the related flow (e.g. to + * destroy it or retrieve counters). + */ +struct rte_flow; + +/** + * Verbose error types. + * + * Most of them provide the type of the object referenced by struct + * rte_flow_error.cause. + */ +enum rte_flow_error_type { + RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */ + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */ + RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */ + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */ + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */ + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */ + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */ + RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */ + RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */ + RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */ + RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */ + RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */ +}; + +/** + * Verbose error structure definition. + * + * This object is normally allocated by applications and set by PMDs, the + * message points to a constant string which does not need to be freed by + * the application, however its pointer can be considered valid only as long + * as its associated DPDK port remains configured. Closing the underlying + * device or unloading the PMD invalidates it. + * + * Both cause and message may be NULL regardless of the error type. + */ +struct rte_flow_error { + enum rte_flow_error_type type; /**< Cause field and error types. */ + const void *cause; /**< Object responsible for the error. */ + const char *message; /**< Human-readable error message. */ +}; + +/** + * Check whether a flow rule can be created on a given port. + * + * The flow rule is validated for correctness and whether it could be accepted + * by the device given sufficient resources. The rule is checked against the + * current device mode and queue configuration. The flow rule may also + * optionally be validated against existing flow rules and device resources. + * This function has no effect on the target device. + * + * The returned value is guaranteed to remain valid only as long as no + * successful calls to rte_flow_create() or rte_flow_destroy() are made in + * the meantime and no device parameter affecting flow rules in any way are + * modified, due to possible collisions or resource limitations (although in + * such cases EINVAL should not be returned). + * + * @param port_id + * Port identifier of Ethernet device. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * + * @return + * 0 if flow rule is valid and can be created. A negative errno value + * otherwise (rte_errno is also set), the following errors are defined: + * + * -ENOSYS: underlying device does not support this functionality. + * + * -EINVAL: unknown or invalid rule specification. + * + * -ENOTSUP: valid but unsupported rule specification (e.g. partial + * bit-masks are unsupported). + * + * -EEXIST: collision with an existing rule. Only returned if device + * supports flow rule collision checking and there was a flow rule + * collision. Not receiving this return code is no guarantee that creating + * the rule will not fail due to a collision. + * + * -ENOMEM: not enough memory to execute the function, or if the device + * supports resource validation, resource limitation on the device. + * + * -EBUSY: action cannot be performed due to busy device resources, may + * succeed if the affected queues or even the entire port are in a stopped + * state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()). + */ +int +rte_flow_validate(uint8_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); + +/** + * Create a flow rule on a given port. + * + * @param port_id + * Port identifier of Ethernet device. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * + * @return + * A valid handle in case of success, NULL otherwise and rte_errno is set + * to the positive version of one of the error codes defined for + * rte_flow_validate(). + */ +struct rte_flow * +rte_flow_create(uint8_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); + +/** + * Destroy a flow rule on a given port. + * + * Failure to destroy a flow rule handle may occur when other flow rules + * depend on it, and destroying it would result in an inconsistent state. + * + * This function is only guaranteed to succeed if handles are destroyed in + * reverse order of their creation. + * + * @param port_id + * Port identifier of Ethernet device. + * @param flow + * Flow rule handle to destroy. + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +rte_flow_destroy(uint8_t port_id, + struct rte_flow *flow, + struct rte_flow_error *error); + +/** + * Destroy all flow rules associated with a port. + * + * In the unlikely event of failure, handles are still considered destroyed + * and no longer valid but the port must be assumed to be in an inconsistent + * state. + * + * @param port_id + * Port identifier of Ethernet device. + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +rte_flow_flush(uint8_t port_id, + struct rte_flow_error *error); + +/** + * Query an existing flow rule. + * + * This function allows retrieving flow-specific data such as counters. + * Data is gathered by special actions which must be present in the flow + * rule definition. + * + * \see RTE_FLOW_ACTION_TYPE_COUNT + * + * @param port_id + * Port identifier of Ethernet device. + * @param flow + * Flow rule handle to query. + * @param action + * Action type to query. + * @param[in, out] data + * Pointer to storage for the associated query data type. + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +rte_flow_query(uint8_t port_id, + struct rte_flow *flow, + enum rte_flow_action_type action, + void *data, + struct rte_flow_error *error); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_FLOW_H_ */ diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ether/rte_flow_driver.h new file mode 100644 index 00000000..da5749d5 --- /dev/null +++ b/lib/librte_ether/rte_flow_driver.h @@ -0,0 +1,182 @@ +/*- + * BSD LICENSE + * + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_FLOW_DRIVER_H_ +#define RTE_FLOW_DRIVER_H_ + +/** + * @file + * RTE generic flow API (driver side) + * + * This file provides implementation helpers for internal use by PMDs, they + * are not intended to be exposed to applications and are not subject to ABI + * versioning. + */ + +#include + +#include +#include "rte_ethdev.h" +#include "rte_flow.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Generic flow operations structure implemented and returned by PMDs. + * + * To implement this API, PMDs must handle the RTE_ETH_FILTER_GENERIC filter + * type in their .filter_ctrl callback function (struct eth_dev_ops) as well + * as the RTE_ETH_FILTER_GET filter operation. + * + * If successful, this operation must result in a pointer to a PMD-specific + * struct rte_flow_ops written to the argument address as described below: + * + * \code + * + * // PMD filter_ctrl callback + * + * static const struct rte_flow_ops pmd_flow_ops = { ... }; + * + * switch (filter_type) { + * case RTE_ETH_FILTER_GENERIC: + * if (filter_op != RTE_ETH_FILTER_GET) + * return -EINVAL; + * *(const void **)arg = &pmd_flow_ops; + * return 0; + * } + * + * \endcode + * + * See also rte_flow_ops_get(). + * + * These callback functions are not supposed to be used by applications + * directly, which must rely on the API defined in rte_flow.h. + * + * Public-facing wrapper functions perform a few consistency checks so that + * unimplemented (i.e. NULL) callbacks simply return -ENOTSUP. These + * callbacks otherwise only differ by their first argument (with port ID + * already resolved to a pointer to struct rte_eth_dev). + */ +struct rte_flow_ops { + /** See rte_flow_validate(). */ + int (*validate) + (struct rte_eth_dev *, + const struct rte_flow_attr *, + const struct rte_flow_item [], + const struct rte_flow_action [], + struct rte_flow_error *); + /** See rte_flow_create(). */ + struct rte_flow *(*create) + (struct rte_eth_dev *, + const struct rte_flow_attr *, + const struct rte_flow_item [], + const struct rte_flow_action [], + struct rte_flow_error *); + /** See rte_flow_destroy(). */ + int (*destroy) + (struct rte_eth_dev *, + struct rte_flow *, + struct rte_flow_error *); + /** See rte_flow_flush(). */ + int (*flush) + (struct rte_eth_dev *, + struct rte_flow_error *); + /** See rte_flow_query(). */ + int (*query) + (struct rte_eth_dev *, + struct rte_flow *, + enum rte_flow_action_type, + void *, + struct rte_flow_error *); +}; + +/** + * Initialize generic flow error structure. + * + * This function also sets rte_errno to a given value. + * + * @param[out] error + * Pointer to flow error structure (may be NULL). + * @param code + * Related error code (rte_errno). + * @param type + * Cause field and error types. + * @param cause + * Object responsible for the error. + * @param message + * Human-readable error message. + * + * @return + * Error code. + */ +static inline int +rte_flow_error_set(struct rte_flow_error *error, + int code, + enum rte_flow_error_type type, + const void *cause, + const char *message) +{ + if (error) { + *error = (struct rte_flow_error){ + .type = type, + .cause = cause, + .message = message, + }; + } + rte_errno = code; + return code; +} + +/** + * Get generic flow operations structure from a port. + * + * @param port_id + * Port identifier to query. + * @param[out] error + * Pointer to flow error structure. + * + * @return + * The flow operations structure associated with port_id, NULL in case of + * error, in which case rte_errno is set and the error structure contains + * additional details. + */ +const struct rte_flow_ops * +rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_FLOW_DRIVER_H_ */ -- cgit 1.2.3-korg