aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_ether
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5 /lib/librte_ether
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'lib/librte_ether')
-rw-r--r--lib/librte_ether/Makefile5
-rw-r--r--lib/librte_ether/rte_ethdev.c261
-rw-r--r--lib/librte_ether/rte_ethdev.h347
-rw-r--r--lib/librte_ether/rte_ethdev_pci.h11
-rw-r--r--lib/librte_ether/rte_ethdev_vdev.h1
-rw-r--r--lib/librte_ether/rte_ether_version.map51
-rw-r--r--lib/librte_ether/rte_flow.c249
-rw-r--r--lib/librte_ether/rte_flow.h176
-rw-r--r--lib/librte_ether/rte_flow_driver.h5
-rw-r--r--lib/librte_ether/rte_tm.c438
-rw-r--r--lib/librte_ether/rte_tm.h1912
-rw-r--r--lib/librte_ether/rte_tm_driver.h366
12 files changed, 3355 insertions, 467 deletions
diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile
index 93fdde10..db692ae4 100644
--- a/lib/librte_ether/Makefile
+++ b/lib/librte_ether/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@ LIBABIVER := 6
SRCS-y += rte_ethdev.c
SRCS-y += rte_flow.c
+SRCS-y += rte_tm.c
#
# Export include files
@@ -56,5 +57,7 @@ SYMLINK-y-include += rte_eth_ctrl.h
SYMLINK-y-include += rte_dev_info.h
SYMLINK-y-include += rte_flow.h
SYMLINK-y-include += rte_flow_driver.h
+SYMLINK-y-include += rte_tm.h
+SYMLINK-y-include += rte_tm_driver.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 83898a8f..0597641e 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -72,7 +72,6 @@ static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
static struct rte_eth_dev_data *rte_eth_dev_data;
static uint8_t eth_dev_last_created_port;
-static uint8_t nb_ports;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@@ -129,6 +128,7 @@ struct rte_eth_dev_callback {
TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
void *cb_arg; /**< Parameter for callback */
+ void *ret_param; /**< Return parameter */
enum rte_eth_event_type event; /**< Interrupt event type */
uint32_t active; /**< Callback is executing */
};
@@ -178,9 +178,11 @@ rte_eth_dev_allocated(const char *name)
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
- strcmp(rte_eth_devices[i].data->name, name) == 0)
- return &rte_eth_devices[i];
+ if (rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED &&
+ rte_eth_devices[i].device) {
+ if (!strcmp(rte_eth_devices[i].device->name, name))
+ return &rte_eth_devices[i];
+ }
}
return NULL;
}
@@ -207,7 +209,6 @@ eth_dev_get(uint8_t port_id)
TAILQ_INIT(&(eth_dev->link_intr_cbs));
eth_dev_last_created_port = port_id;
- nb_ports++;
return eth_dev;
}
@@ -280,7 +281,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
return -EINVAL;
eth_dev->state = RTE_ETH_DEV_UNUSED;
- nb_ports--;
return 0;
}
@@ -288,7 +288,8 @@ int
rte_eth_dev_is_valid_port(uint8_t port_id)
{
if (port_id >= RTE_MAX_ETHPORTS ||
- rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
+ (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
+ rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
return 0;
else
return 1;
@@ -304,13 +305,21 @@ rte_eth_dev_socket_id(uint8_t port_id)
uint8_t
rte_eth_dev_count(void)
{
- return nb_ports;
+ uint8_t p;
+ uint8_t count;
+
+ count = 0;
+
+ RTE_ETH_FOREACH_DEV(p)
+ count++;
+
+ return count;
}
int
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
{
- char *tmp;
+ const char *tmp;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -321,7 +330,7 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
/* shouldn't check 'rte_eth_devices[i].data',
* because it might be overwritten by VDEV PMD */
- tmp = rte_eth_dev_data[port_id].name;
+ tmp = rte_eth_devices[port_id].device->name;
strcpy(name, tmp);
return 0;
}
@@ -329,6 +338,7 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
int
rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
{
+ int ret;
int i;
if (name == NULL) {
@@ -336,16 +346,14 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
return -EINVAL;
}
- if (!nb_ports)
- return -ENODEV;
-
- *port_id = RTE_MAX_ETHPORTS;
RTE_ETH_FOREACH_DEV(i) {
- if (!strncmp(name,
- rte_eth_dev_data[i].name, strlen(name))) {
+ if (!rte_eth_devices[i].device)
+ continue;
+ ret = strncmp(name, rte_eth_devices[i].device->name,
+ strlen(name));
+ if (ret == 0) {
*port_id = i;
-
return 0;
}
}
@@ -359,16 +367,6 @@ rte_eth_dev_is_detachable(uint8_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
- switch (rte_eth_devices[port_id].data->kdrv) {
- case RTE_KDRV_IGB_UIO:
- case RTE_KDRV_UIO_GENERIC:
- case RTE_KDRV_NIC_UIO:
- case RTE_KDRV_NONE:
- case RTE_KDRV_VFIO:
- break;
- default:
- return -ENOTSUP;
- }
dev_flags = rte_eth_devices[port_id].data->dev_flags;
if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
(!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
@@ -438,12 +436,14 @@ rte_eth_dev_detach(uint8_t port_id, char *name)
if (rte_eth_dev_is_detachable(port_id))
goto err;
- snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
- "%s", rte_eth_devices[port_id].data->name);
- ret = rte_eal_dev_detach(name);
+ snprintf(name, RTE_DEV_NAME_MAX_LEN, "%s",
+ rte_eth_devices[port_id].device->name);
+
+ ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
if (ret < 0)
goto err;
+ rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED;
return 0;
err:
@@ -753,13 +753,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
if ((dev_conf->intr_conf.lsc == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
- dev->data->drv_name);
+ dev->device->driver->name);
return -EINVAL;
}
if ((dev_conf->intr_conf.rmv == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
- dev->data->drv_name);
+ dev->device->driver->name);
return -EINVAL;
}
@@ -1900,7 +1900,7 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
- dev_info->driver_name = dev->data->drv_name;
+ dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
}
@@ -1975,6 +1975,7 @@ int
rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
{
struct rte_eth_dev *dev;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -1990,7 +1991,23 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
- return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+ ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+ if (ret == 0) {
+ struct rte_vlan_filter_conf *vfc;
+ int vidx;
+ int vbit;
+
+ vfc = &dev->data->vlan_filter_conf;
+ vidx = vlan_id / 64;
+ vbit = vlan_id % 64;
+
+ if (on)
+ vfc->ids[vidx] |= UINT64_C(1) << vbit;
+ else
+ vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
+ }
+
+ return ret;
}
int
@@ -2351,6 +2368,7 @@ get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
unsigned i;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_dev_info_get(port_id, &dev_info);
for (i = 0; i < dev_info.max_mac_addrs; i++)
@@ -2718,12 +2736,13 @@ rte_eth_dev_callback_unregister(uint8_t port_id,
return ret;
}
-void
+int
_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *cb_arg)
+ enum rte_eth_event_type event, void *cb_arg, void *ret_param)
{
struct rte_eth_dev_callback *cb_lst;
struct rte_eth_dev_callback dev_cb;
+ int rc = 0;
rte_spinlock_lock(&rte_eth_dev_cb_lock);
TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
@@ -2733,14 +2752,17 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
cb_lst->active = 1;
if (cb_arg != NULL)
dev_cb.cb_arg = cb_arg;
+ if (ret_param != NULL)
+ dev_cb.ret_param = ret_param;
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
- dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
- dev_cb.cb_arg);
+ rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
+ dev_cb.cb_arg, dev_cb.ret_param);
rte_spinlock_lock(&rte_eth_dev_cb_lock);
cb_lst->active = 0;
}
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return rc;
}
int
@@ -2789,7 +2811,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->data->drv_name, ring_name,
+ dev->device->driver->name, ring_name,
dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
@@ -2872,128 +2894,6 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id,
return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
}
-#ifdef RTE_NIC_BYPASS
-int rte_eth_dev_bypass_init(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
- (*dev->dev_ops->bypass_init)(dev);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
- (*dev->dev_ops->bypass_state_show)(dev, state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
- (*dev->dev_ops->bypass_state_set)(dev, new_state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
- (*dev->dev_ops->bypass_event_show)(dev, event, state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
- (*dev->dev_ops->bypass_event_set)(dev, event, state);
- return 0;
-}
-
-int
-rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
- (*dev->dev_ops->bypass_ver_show)(dev, ver);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_wd_reset(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_reset)(dev);
- return 0;
-}
-#endif
int
rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
@@ -3472,3 +3372,40 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
-ENOTSUP);
return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
}
+
+static void
+rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
+ const struct rte_eth_desc_lim *desc_lim)
+{
+ if (desc_lim->nb_align != 0)
+ *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
+
+ if (desc_lim->nb_max != 0)
+ *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
+
+ *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
+}
+
+int
+rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+ uint16_t *nb_rx_desc,
+ uint16_t *nb_tx_desc)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ if (nb_rx_desc != NULL)
+ rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
+
+ if (nb_tx_desc != NULL)
+ rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
+
+ return 0;
+}
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 0f38b45f..0adf3274 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -118,7 +118,7 @@
* - NIC queue statistics mappings
*
* Any other configuration will not be stored and will need to be re-entered
- * after a call to rte_eth_dev_start().
+ * before a call to rte_eth_dev_start().
*
* Finally, a network application can close an Ethernet device by invoking the
* rte_eth_dev_close() function.
@@ -172,8 +172,6 @@ extern "C" {
#include <stdint.h>
-#include <rte_dev.h>
-
/* Use this macro to check if LRO API is supported */
#define RTE_ETHDEV_HAS_LRO_SUPPORT
@@ -374,6 +372,14 @@ enum rte_vlan_type {
};
/**
+ * A structure used to describe a vlan filter.
+ * If the bit corresponding to a VID is set, such VID is on.
+ */
+struct rte_vlan_filter_conf {
+ uint64_t ids[64];
+};
+
+/**
* A structure used to configure the Receive Side Scaling (RSS) feature
* of an Ethernet port.
* If not NULL, the *rss_key* pointer of the *rss_conf* structure points
@@ -629,6 +635,24 @@ struct rte_eth_vmdq_dcb_conf {
/**< Selects a queue in a pool */
};
+/**
+ * A structure used to configure the VMDQ feature of an Ethernet port when
+ * not combined with the DCB feature.
+ *
+ * Using this feature, packets are routed to a pool of queues. By default,
+ * the pool selection is based on the MAC address, the vlan id in the
+ * vlan tag as specified in the pool_map array.
+ * Passing the ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
+ * selection using only the MAC address. MAC address to pool mapping is done
+ * using the rte_eth_dev_mac_addr_add function, with the pool parameter
+ * corresponding to the pool id.
+ *
+ * Queue selection within the selected pool will be done using RSS when
+ * it is enabled or revert to the first queue of the pool if not.
+ *
+ * A default pool may be used, if desired, to route all traffic which
+ * does not match the vlan filter rules or any pool MAC address.
+ */
struct rte_eth_vmdq_rx_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< VMDq only mode, 8 or 64 pools */
uint8_t enable_default_pool; /**< If non-zero, use a default pool */
@@ -901,6 +925,10 @@ struct rte_eth_conf {
#define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800 /**< Used for tunneling packet. */
#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000 /**< Used for tunneling packet. */
#define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
+#define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
+/**< Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
+ * tx queue without SW lock.
+ */
struct rte_pci_device;
@@ -1048,6 +1076,8 @@ TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
} \
} while (0)
+#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
+
/**
* l2 tunnel configuration.
*/
@@ -1381,59 +1411,6 @@ typedef int (*eth_l2_tunnel_offload_set_t)
uint8_t en);
/**< @internal enable/disable the l2 tunnel offload functions */
-#ifdef RTE_NIC_BYPASS
-
-enum {
- RTE_BYPASS_MODE_NONE,
- RTE_BYPASS_MODE_NORMAL,
- RTE_BYPASS_MODE_BYPASS,
- RTE_BYPASS_MODE_ISOLATE,
- RTE_BYPASS_MODE_NUM,
-};
-
-#define RTE_BYPASS_MODE_VALID(x) \
- ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
-
-enum {
- RTE_BYPASS_EVENT_NONE,
- RTE_BYPASS_EVENT_START,
- RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
- RTE_BYPASS_EVENT_POWER_ON,
- RTE_BYPASS_EVENT_OS_OFF,
- RTE_BYPASS_EVENT_POWER_OFF,
- RTE_BYPASS_EVENT_TIMEOUT,
- RTE_BYPASS_EVENT_NUM
-};
-
-#define RTE_BYPASS_EVENT_VALID(x) \
- ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
-
-enum {
- RTE_BYPASS_TMT_OFF, /* timeout disabled. */
- RTE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
- RTE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
- RTE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
- RTE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
- RTE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
- RTE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
- RTE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
- RTE_BYPASS_TMT_NUM
-};
-
-#define RTE_BYPASS_TMT_VALID(x) \
- ((x) == RTE_BYPASS_TMT_OFF || \
- ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
-
-typedef void (*bypass_init_t)(struct rte_eth_dev *dev);
-typedef int32_t (*bypass_state_set_t)(struct rte_eth_dev *dev, uint32_t *new_state);
-typedef int32_t (*bypass_state_show_t)(struct rte_eth_dev *dev, uint32_t *state);
-typedef int32_t (*bypass_event_set_t)(struct rte_eth_dev *dev, uint32_t state, uint32_t event);
-typedef int32_t (*bypass_event_show_t)(struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
-typedef int32_t (*bypass_wd_timeout_set_t)(struct rte_eth_dev *dev, uint32_t timeout);
-typedef int32_t (*bypass_wd_timeout_show_t)(struct rte_eth_dev *dev, uint32_t *wd_timeout);
-typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
-typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
-#endif
typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
@@ -1441,6 +1418,9 @@ typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
void *arg);
/**< @internal Take operations to assigned filter type on an Ethernet device */
+typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
+/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
+
typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
/**< @internal Get dcb information on an Ethernet device */
@@ -1460,7 +1440,7 @@ struct eth_dev_ops {
eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */
eth_promiscuous_disable_t promiscuous_disable;/**< Promiscuous OFF. */
eth_allmulticast_enable_t allmulticast_enable;/**< RX multicast ON. */
- eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OF. */
+ eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OFF. */
eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address. */
eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address. */
eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address. */
@@ -1540,18 +1520,6 @@ struct eth_dev_ops {
eth_get_eeprom_t get_eeprom; /**< Get eeprom data. */
eth_set_eeprom_t set_eeprom; /**< Set eeprom. */
- /* bypass control */
-#ifdef RTE_NIC_BYPASS
- bypass_init_t bypass_init;
- bypass_state_set_t bypass_state_set;
- bypass_state_show_t bypass_state_show;
- bypass_event_set_t bypass_event_set;
- bypass_event_show_t bypass_event_show;
- bypass_wd_timeout_set_t bypass_wd_timeout_set;
- bypass_wd_timeout_show_t bypass_wd_timeout_show;
- bypass_ver_show_t bypass_ver_show;
- bypass_wd_reset_t bypass_wd_reset;
-#endif
eth_filter_ctrl_t filter_ctrl; /**< common filter control. */
@@ -1573,6 +1541,9 @@ struct eth_dev_ops {
/**< Get extended device statistic values by ID. */
eth_xstats_get_names_by_id_t xstats_get_names_by_id;
/**< Get name of extended device statistics by ID. */
+
+ eth_tm_ops_get_t tm_ops_get;
+ /**< Get Traffic Management (TM) operations. */
};
/**
@@ -1644,6 +1615,7 @@ struct rte_eth_rxtx_callback {
enum rte_eth_dev_state {
RTE_ETH_DEV_UNUSED = 0,
RTE_ETH_DEV_ATTACHED,
+ RTE_ETH_DEV_DEFERRED,
};
/**
@@ -1687,7 +1659,7 @@ struct rte_eth_dev_sriov {
};
#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
-#define RTE_ETH_NAME_MAX_LEN (32)
+#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
/**
* @internal
@@ -1737,7 +1709,8 @@ struct rte_eth_dev_data {
uint32_t dev_flags; /**< Capabilities */
enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
int numa_node; /**< NUMA node connection */
- const char *drv_name; /**< Driver name */
+ struct rte_vlan_filter_conf vlan_filter_conf;
+ /**< VLAN filter configuration. */
};
/** Device supports hotplug detach */
@@ -1777,13 +1750,12 @@ uint8_t rte_eth_find_next(uint8_t port_id);
/**
* Get the total number of Ethernet devices that have been successfully
- * initialized by the [matching] Ethernet driver during the PCI probing phase.
- * All devices whose port identifier is in the range
- * [0, rte_eth_dev_count() - 1] can be operated on by network applications
- * immediately after invoking rte_eal_init().
- * If the application unplugs a port using hotplug function, The enabled port
- * numbers may be noncontiguous. In the case, the applications need to manage
- * enabled port by using the ``RTE_ETH_FOREACH_DEV()`` macro.
+ * initialized by the matching Ethernet driver during the PCI probing phase
+ * and that are available for applications to use. These devices must be
+ * accessed by using the ``RTE_ETH_FOREACH_DEV()`` macro to deal with
+ * non-contiguous ranges of devices.
+ * These non-contiguous ranges can be created by calls to hotplug functions or
+ * by some PMDs.
*
* @return
* - The total number of usable Ethernet devices.
@@ -1859,7 +1831,8 @@ int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
* @param port_id
* The port identifier of the device to detach.
* @param devname
- * A pointer to a device name actually detached.
+ * A pointer to a buffer that will be filled with the device name.
+ * This buffer must be at least RTE_DEV_NAME_MAX_LEN long.
* @return
* 0 on success and devname is filled, negative on error
*/
@@ -2358,7 +2331,7 @@ rte_eth_xstats_get_names_by_id(uint8_t port_id,
* @param port_id
* The port identifier of the Ethernet device.
* @param ids
- * A pointer to an ids array passed by application. This tells wich
+ * A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
* can be set to NULL if n is 0. In this case function will retrieve
* all avalible statistics.
@@ -2997,6 +2970,10 @@ static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
* rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf* buffers
* of those packets whose transmission was effectively completed.
*
+ * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * invoke this function concurrently on the same tx queue without SW lock.
+ * @see rte_eth_dev_info_get, struct rte_eth_txconf::txq_flags
+ *
* @param port_id
* The port identifier of the Ethernet device.
* @param queue_id
@@ -3266,7 +3243,7 @@ rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
* causing N packets to be sent, and the error callback to be called for
* the rest.
*/
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
{
@@ -3401,8 +3378,8 @@ enum rte_eth_event_type {
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
-typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
- enum rte_eth_event_type event, void *cb_arg);
+typedef int (*rte_eth_dev_cb_fn)(uint8_t port_id,
+ enum rte_eth_event_type event, void *cb_arg, void *ret_param);
/**< user application callback to be registered for interrupts */
@@ -3419,11 +3396,6 @@ typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
* @param cb_arg
* Pointer to the parameters for the registered callback.
*
- * The user data is overwritten in the case of RTE_ETH_EVENT_VF_MBOX.
- * This even occurs when a message from the VF is received by the PF.
- * The user data is overwritten with struct rte_pmd_ixgbe_mb_event_param.
- * This struct is defined in rte_pmd_ixgbe.h.
- *
* @return
* - On success, zero.
* - On failure, a negative value.
@@ -3463,15 +3435,17 @@ int rte_eth_dev_callback_unregister(uint8_t port_id,
* @param event
* Eth device interrupt event type.
* @param cb_arg
- * Update callback parameter to pass data back to user application.
+ * callback parameter.
+ * @param ret_param
+ * To pass data back to user application.
* This allows the user application to decide if a particular function
* is permitted or not.
*
* @return
- * void
+ * int
*/
-void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *cb_arg);
+int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event, void *cb_arg, void *ret_param);
/**
* When there is no rx packet coming in Rx Queue for a long time, we can
@@ -3827,171 +3801,6 @@ int rte_eth_mirror_rule_reset(uint8_t port_id,
int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
uint16_t tx_rate);
-/**
- * Initialize bypass logic. This function needs to be called before
- * executing any other bypass API.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_init(uint8_t port);
-
-/**
- * Return bypass state.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param state
- * The return bypass state.
- * - (1) Normal mode
- * - (2) Bypass mode
- * - (3) Isolate mode
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
-
-/**
- * Set bypass state
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param new_state
- * The current bypass state.
- * - (1) Normal mode
- * - (2) Bypass mode
- * - (3) Isolate mode
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_state_set(uint8_t port, uint32_t *new_state);
-
-/**
- * Return bypass state when given event occurs.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param event
- * The bypass event
- * - (1) Main power on (power button is pushed)
- * - (2) Auxiliary power on (power supply is being plugged)
- * - (3) Main power off (system shutdown and power supply is left plugged in)
- * - (4) Auxiliary power off (power supply is being unplugged)
- * - (5) Display or set the watchdog timer
- * @param state
- * The bypass state when given event occurred.
- * - (1) Normal mode
- * - (2) Bypass mode
- * - (3) Isolate mode
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_event_show(uint8_t port, uint32_t event, uint32_t *state);
-
-/**
- * Set bypass state when given event occurs.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param event
- * The bypass event
- * - (1) Main power on (power button is pushed)
- * - (2) Auxiliary power on (power supply is being plugged)
- * - (3) Main power off (system shutdown and power supply is left plugged in)
- * - (4) Auxiliary power off (power supply is being unplugged)
- * - (5) Display or set the watchdog timer
- * @param state
- * The assigned state when given event occurs.
- * - (1) Normal mode
- * - (2) Bypass mode
- * - (3) Isolate mode
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state);
-
-/**
- * Set bypass watchdog timeout count.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param timeout
- * The timeout to be set.
- * - (0) 0 seconds (timer is off)
- * - (1) 1.5 seconds
- * - (2) 2 seconds
- * - (3) 3 seconds
- * - (4) 4 seconds
- * - (5) 8 seconds
- * - (6) 16 seconds
- * - (7) 32 seconds
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_wd_timeout_store(uint8_t port, uint32_t timeout);
-
-/**
- * Get bypass firmware version.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param ver
- * The firmware version
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_ver_show(uint8_t port, uint32_t *ver);
-
-/**
- * Return bypass watchdog timeout in seconds
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param wd_timeout
- * The return watchdog timeout. "0" represents timer expired
- * - (0) 0 seconds (timer is off)
- * - (1) 1.5 seconds
- * - (2) 2 seconds
- * - (3) 3 seconds
- * - (4) 4 seconds
- * - (5) 8 seconds
- * - (6) 16 seconds
- * - (7) 32 seconds
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
-
-/**
- * Reset bypass watchdog timer
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_bypass_wd_reset(uint8_t port);
-
/**
* Configuration of Receive Side Scaling hash computation of Ethernet device.
*
@@ -4587,7 +4396,7 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
* @param port_id
* pointer to port identifier of the device
* @return
-* - (0) if successful.
+* - (0) if successful and port_id is filled.
* - (-ENODEV or -EINVAL) on failure.
*/
int
@@ -4607,6 +4416,26 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
int
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
+/**
+ * Check that numbers of Rx and Tx descriptors satisfy descriptors limits from
+ * the ethernet device information, otherwise adjust them to boundaries.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param nb_rx_desc
+ * A pointer to a uint16_t where the number of receive
+ * descriptors stored.
+ * @param nb_tx_desc
+ * A pointer to a uint16_t where the number of transmit
+ * descriptors stored.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP, -ENODEV or -EINVAL) on failure.
+ */
+int rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+ uint16_t *nb_rx_desc,
+ uint16_t *nb_tx_desc);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ether/rte_ethdev_pci.h
index d3bc03cf..56b10721 100644
--- a/lib/librte_ether/rte_ethdev_pci.h
+++ b/lib/librte_ether/rte_ethdev_pci.h
@@ -45,9 +45,6 @@
* The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
* @param pci_dev
* The *pci_dev* pointer is the address of the *rte_pci_device* structure.
- *
- * @return
- * - 0 on success, negative on error
*/
static inline void
rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
@@ -69,7 +66,6 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
eth_dev->data->kdrv = pci_dev->kdrv;
eth_dev->data->numa_node = pci_dev->device.numa_node;
- eth_dev->data->drv_name = pci_dev->driver->driver.name;
}
/**
@@ -118,7 +114,6 @@ rte_eth_dev_pci_allocate(struct rte_pci_device *dev, size_t private_data_size)
}
eth_dev->device = &dev->device;
- eth_dev->intr_handle = &dev->intr_handle;
rte_eth_copy_pci_info(eth_dev, dev);
return eth_dev;
}
@@ -134,6 +129,12 @@ rte_eth_dev_pci_release(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_private = NULL;
+ /*
+ * Secondary process will check the name to attach.
+ * Clear this field to avoid attaching a released ports.
+ */
+ eth_dev->data->name[0] = '\0';
+
eth_dev->device = NULL;
eth_dev->intr_handle = NULL;
}
diff --git a/lib/librte_ether/rte_ethdev_vdev.h b/lib/librte_ether/rte_ethdev_vdev.h
index fa2cb61e..4d2c3e2b 100644
--- a/lib/librte_ether/rte_ethdev_vdev.h
+++ b/lib/librte_ether/rte_ethdev_vdev.h
@@ -77,7 +77,6 @@ rte_eth_vdev_allocate(struct rte_vdev_device *dev, size_t private_data_size)
eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->data->numa_node = dev->device.numa_node;
- eth_dev->data->drv_name = dev->device.driver->name;
return eth_dev;
}
diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map
index d6726bb1..42837285 100644
--- a/lib/librte_ether/rte_ether_version.map
+++ b/lib/librte_ether/rte_ether_version.map
@@ -1,7 +1,6 @@
DPDK_2.2 {
global:
- _rte_eth_dev_callback_process;
rte_eth_add_rx_callback;
rte_eth_add_tx_callback;
rte_eth_allmulticast_disable;
@@ -10,14 +9,6 @@ DPDK_2.2 {
rte_eth_dev_allocate;
rte_eth_dev_allocated;
rte_eth_dev_attach;
- rte_eth_dev_bypass_event_show;
- rte_eth_dev_bypass_event_store;
- rte_eth_dev_bypass_init;
- rte_eth_dev_bypass_state_set;
- rte_eth_dev_bypass_state_show;
- rte_eth_dev_bypass_ver_show;
- rte_eth_dev_bypass_wd_reset;
- rte_eth_dev_bypass_wd_timeout_show;
rte_eth_dev_callback_register;
rte_eth_dev_callback_unregister;
rte_eth_dev_close;
@@ -70,7 +61,6 @@ DPDK_2.2 {
rte_eth_dev_uc_all_hash_table_set;
rte_eth_dev_uc_hash_table_set;
rte_eth_dev_vlan_filter;
- rte_eth_dev_wd_timeout_store;
rte_eth_dma_zone_reserve;
rte_eth_led_off;
rte_eth_led_on;
@@ -151,8 +141,49 @@ DPDK_17.05 {
rte_eth_dev_attach_secondary;
rte_eth_find_next;
+ rte_eth_tx_done_cleanup;
rte_eth_xstats_get_by_id;
rte_eth_xstats_get_id_by_name;
rte_eth_xstats_get_names_by_id;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ _rte_eth_dev_callback_process;
+ rte_eth_dev_adjust_nb_rx_tx_desc;
+ rte_flow_copy;
+ rte_flow_isolate;
+ rte_tm_capabilities_get;
+ rte_tm_get_leaf_nodes;
+ rte_tm_hierarchy_commit;
+ rte_tm_level_capabilities_get;
+ rte_tm_mark_ip_dscp;
+ rte_tm_mark_ip_ecn;
+ rte_tm_mark_vlan_dei;
+ rte_tm_node_add;
+ rte_tm_node_capabilities_get;
+ rte_tm_node_cman_update;
+ rte_tm_node_delete;
+ rte_tm_node_parent_update;
+ rte_tm_node_resume;
+ rte_tm_node_shaper_update;
+ rte_tm_node_shared_shaper_update;
+ rte_tm_node_shared_wred_context_update;
+ rte_tm_node_stats_read;
+ rte_tm_node_stats_update;
+ rte_tm_node_suspend;
+ rte_tm_node_type_get;
+ rte_tm_node_wfq_weight_mode_update;
+ rte_tm_node_wred_context_update;
+ rte_tm_shaper_profile_add;
+ rte_tm_shaper_profile_delete;
+ rte_tm_shared_shaper_add_update;
+ rte_tm_shared_shaper_delete;
+ rte_tm_shared_wred_context_add_update;
+ rte_tm_shared_wred_context_delete;
+ rte_tm_wred_profile_add;
+ rte_tm_wred_profile_delete;
+
+} DPDK_17.05;
diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ether/rte_flow.c
index aaa70d68..2001fbbf 100644
--- a/lib/librte_ether/rte_flow.c
+++ b/lib/librte_ether/rte_flow.c
@@ -31,14 +31,81 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <errno.h>
+#include <stddef.h>
#include <stdint.h>
+#include <string.h>
+#include <rte_common.h>
#include <rte_errno.h>
#include <rte_branch_prediction.h>
#include "rte_ethdev.h"
#include "rte_flow_driver.h"
#include "rte_flow.h"
+/**
+ * Flow elements description tables.
+ */
+struct rte_flow_desc_data {
+ const char *name;
+ size_t size;
+};
+
+/** Generate flow_item[] entry. */
+#define MK_FLOW_ITEM(t, s) \
+ [RTE_FLOW_ITEM_TYPE_ ## t] = { \
+ .name = # t, \
+ .size = s, \
+ }
+
+/** Information about known flow pattern items. */
+static const struct rte_flow_desc_data rte_flow_desc_item[] = {
+ MK_FLOW_ITEM(END, 0),
+ MK_FLOW_ITEM(VOID, 0),
+ MK_FLOW_ITEM(INVERT, 0),
+ MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
+ MK_FLOW_ITEM(PF, 0),
+ MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
+ MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
+ MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
+ MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
+ MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
+ MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
+ MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
+ MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
+ MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
+ MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
+ MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
+ MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
+ MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
+ MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
+ MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
+ MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
+};
+
+/** Generate flow_action[] entry. */
+#define MK_FLOW_ACTION(t, s) \
+ [RTE_FLOW_ACTION_TYPE_ ## t] = { \
+ .name = # t, \
+ .size = s, \
+ }
+
+/** Information about known flow actions. */
+static const struct rte_flow_desc_data rte_flow_desc_action[] = {
+ MK_FLOW_ACTION(END, 0),
+ MK_FLOW_ACTION(VOID, 0),
+ MK_FLOW_ACTION(PASSTHRU, 0),
+ MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
+ MK_FLOW_ACTION(FLAG, 0),
+ MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
+ MK_FLOW_ACTION(DROP, 0),
+ MK_FLOW_ACTION(COUNT, 0),
+ MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
+ MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
+ MK_FLOW_ACTION(PF, 0),
+ MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
+};
+
/* Get generic flow operations structure from a port. */
const struct rte_flow_ops *
rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error)
@@ -157,3 +224,185 @@ rte_flow_query(uint8_t port_id,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
}
+
+/* Restrict ingress traffic to the defined flow rules. */
+int
+rte_flow_isolate(uint8_t port_id,
+ int set,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (!ops)
+ return -rte_errno;
+ if (likely(!!ops->isolate))
+ return ops->isolate(dev, set, error);
+ return -rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
+}
+
+/** Compute storage space needed by item specification. */
+static void
+flow_item_spec_size(const struct rte_flow_item *item,
+ size_t *size, size_t *pad)
+{
+ if (!item->spec) {
+ *size = 0;
+ goto empty;
+ }
+ switch (item->type) {
+ union {
+ const struct rte_flow_item_raw *raw;
+ } spec;
+
+ /* Not a fall-through */
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ spec.raw = item->spec;
+ *size = offsetof(struct rte_flow_item_raw, pattern) +
+ spec.raw->length * sizeof(*spec.raw->pattern);
+ break;
+ default:
+ *size = rte_flow_desc_item[item->type].size;
+ break;
+ }
+empty:
+ *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+}
+
+/** Compute storage space needed by action configuration. */
+static void
+flow_action_conf_size(const struct rte_flow_action *action,
+ size_t *size, size_t *pad)
+{
+ if (!action->conf) {
+ *size = 0;
+ goto empty;
+ }
+ switch (action->type) {
+ union {
+ const struct rte_flow_action_rss *rss;
+ } conf;
+
+ /* Not a fall-through. */
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ conf.rss = action->conf;
+ *size = offsetof(struct rte_flow_action_rss, queue) +
+ conf.rss->num * sizeof(*conf.rss->queue);
+ break;
+ default:
+ *size = rte_flow_desc_action[action->type].size;
+ break;
+ }
+empty:
+ *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+}
+
+/** Store a full rte_flow description. */
+size_t
+rte_flow_copy(struct rte_flow_desc *desc, size_t len,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow_desc *fd = NULL;
+ size_t tmp;
+ size_t pad;
+ size_t off1 = 0;
+ size_t off2 = 0;
+ size_t size = 0;
+
+store:
+ if (items) {
+ const struct rte_flow_item *item;
+
+ item = items;
+ if (fd)
+ fd->items = (void *)&fd->data[off1];
+ do {
+ struct rte_flow_item *dst = NULL;
+
+ if ((size_t)item->type >=
+ RTE_DIM(rte_flow_desc_item) ||
+ !rte_flow_desc_item[item->type].name) {
+ rte_errno = ENOTSUP;
+ return 0;
+ }
+ if (fd)
+ dst = memcpy(fd->data + off1, item,
+ sizeof(*item));
+ off1 += sizeof(*item);
+ flow_item_spec_size(item, &tmp, &pad);
+ if (item->spec) {
+ if (fd)
+ dst->spec = memcpy(fd->data + off2,
+ item->spec, tmp);
+ off2 += tmp + pad;
+ }
+ if (item->last) {
+ if (fd)
+ dst->last = memcpy(fd->data + off2,
+ item->last, tmp);
+ off2 += tmp + pad;
+ }
+ if (item->mask) {
+ if (fd)
+ dst->mask = memcpy(fd->data + off2,
+ item->mask, tmp);
+ off2 += tmp + pad;
+ }
+ off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
+ } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
+ off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
+ }
+ if (actions) {
+ const struct rte_flow_action *action;
+
+ action = actions;
+ if (fd)
+ fd->actions = (void *)&fd->data[off1];
+ do {
+ struct rte_flow_action *dst = NULL;
+
+ if ((size_t)action->type >=
+ RTE_DIM(rte_flow_desc_action) ||
+ !rte_flow_desc_action[action->type].name) {
+ rte_errno = ENOTSUP;
+ return 0;
+ }
+ if (fd)
+ dst = memcpy(fd->data + off1, action,
+ sizeof(*action));
+ off1 += sizeof(*action);
+ flow_action_conf_size(action, &tmp, &pad);
+ if (action->conf) {
+ if (fd)
+ dst->conf = memcpy(fd->data + off2,
+ action->conf, tmp);
+ off2 += tmp + pad;
+ }
+ off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
+ } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
+ }
+ if (fd != NULL)
+ return size;
+ off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
+ tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
+ sizeof(double));
+ size = tmp + off1 + off2;
+ if (size > len)
+ return size;
+ fd = desc;
+ if (fd != NULL) {
+ *fd = (const struct rte_flow_desc) {
+ .size = size,
+ .attr = *attr,
+ };
+ tmp -= offsetof(struct rte_flow_desc, data);
+ off2 = tmp + off1;
+ off1 = tmp;
+ goto store;
+ }
+ return 0;
+}
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index c47edbc9..bba6169f 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -297,6 +297,18 @@ enum rte_flow_item_type {
* See struct rte_flow_item_gre.
*/
RTE_FLOW_ITEM_TYPE_GRE,
+
+ /**
+ * [META]
+ *
+ * Fuzzy pattern match, expect faster than default.
+ *
+ * This is for device that support fuzzy matching option.
+ * Usually a fuzzy matching is fast but the cost is accuracy.
+ *
+ * See struct rte_flow_item_fuzzy.
+ */
+ RTE_FLOW_ITEM_TYPE_FUZZY,
};
/**
@@ -429,7 +441,7 @@ static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
struct rte_flow_item_eth {
struct ether_addr dst; /**< Destination MAC. */
struct ether_addr src; /**< Source MAC. */
- uint16_t type; /**< EtherType. */
+ rte_be16_t type; /**< EtherType. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
@@ -437,7 +449,7 @@ struct rte_flow_item_eth {
static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = 0x0000,
+ .type = RTE_BE16(0x0000),
};
#endif
@@ -450,15 +462,15 @@ static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
* RTE_FLOW_ITEM_TYPE_VLAN.
*/
struct rte_flow_item_vlan {
- uint16_t tpid; /**< Tag protocol identifier. */
- uint16_t tci; /**< Tag control information. */
+ rte_be16_t tpid; /**< Tag protocol identifier. */
+ rte_be16_t tci; /**< Tag control information. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
#ifndef __cplusplus
static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
- .tpid = 0x0000,
- .tci = 0xffff,
+ .tpid = RTE_BE16(0x0000),
+ .tci = RTE_BE16(0xffff),
};
#endif
@@ -477,8 +489,8 @@ struct rte_flow_item_ipv4 {
#ifndef __cplusplus
static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
.hdr = {
- .src_addr = 0xffffffff,
- .dst_addr = 0xffffffff,
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
},
};
#endif
@@ -540,8 +552,8 @@ struct rte_flow_item_udp {
#ifndef __cplusplus
static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
.hdr = {
- .src_port = 0xffff,
- .dst_port = 0xffff,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
};
#endif
@@ -559,8 +571,8 @@ struct rte_flow_item_tcp {
#ifndef __cplusplus
static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
.hdr = {
- .src_port = 0xffff,
- .dst_port = 0xffff,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
};
#endif
@@ -578,8 +590,8 @@ struct rte_flow_item_sctp {
#ifndef __cplusplus
static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
.hdr = {
- .src_port = 0xffff,
- .dst_port = 0xffff,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
};
#endif
@@ -609,14 +621,14 @@ static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
* Matches a E-tag header.
*/
struct rte_flow_item_e_tag {
- uint16_t tpid; /**< Tag protocol identifier (0x893F). */
+ rte_be16_t tpid; /**< Tag protocol identifier (0x893F). */
/**
* E-Tag control information (E-TCI).
* E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
*/
- uint16_t epcp_edei_in_ecid_b;
+ rte_be16_t epcp_edei_in_ecid_b;
/** Reserved (2b), GRP (2b), E-CID base (12b). */
- uint16_t rsvd_grp_ecid_b;
+ rte_be16_t rsvd_grp_ecid_b;
uint8_t in_ecid_e; /**< Ingress E-CID ext. */
uint8_t ecid_e; /**< E-CID ext. */
};
@@ -624,13 +636,7 @@ struct rte_flow_item_e_tag {
/** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
#ifndef __cplusplus
static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = {
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- .rsvd_grp_ecid_b = 0x3fff,
-#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
- .rsvd_grp_ecid_b = 0xff3f,
-#else
-#error Unsupported endianness.
-#endif
+ .rsvd_grp_ecid_b = RTE_BE16(0x3fff),
};
#endif
@@ -646,8 +652,8 @@ struct rte_flow_item_nvgre {
*
* c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637.
*/
- uint16_t c_k_s_rsvd0_ver;
- uint16_t protocol; /**< Protocol type (0x6558). */
+ rte_be16_t c_k_s_rsvd0_ver;
+ rte_be16_t protocol; /**< Protocol type (0x6558). */
uint8_t tni[3]; /**< Virtual subnet ID. */
uint8_t flow_id; /**< Flow ID. */
};
@@ -689,14 +695,42 @@ struct rte_flow_item_gre {
* Checksum (1b), reserved 0 (12b), version (3b).
* Refer to RFC 2784.
*/
- uint16_t c_rsvd0_ver;
- uint16_t protocol; /**< Protocol type. */
+ rte_be16_t c_rsvd0_ver;
+ rte_be16_t protocol; /**< Protocol type. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */
#ifndef __cplusplus
static const struct rte_flow_item_gre rte_flow_item_gre_mask = {
- .protocol = 0xffff,
+ .protocol = RTE_BE16(0xffff),
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_FUZZY
+ *
+ * Fuzzy pattern match, expect faster than default.
+ *
+ * This is for device that support fuzzy match option.
+ * Usually a fuzzy match is fast but the cost is accuracy.
+ * i.e. Signature Match only match pattern's hash value, but it is
+ * possible two different patterns have the same hash value.
+ *
+ * Matching accuracy level can be configure by threshold.
+ * Driver can divide the range of threshold and map to different
+ * accuracy levels that device support.
+ *
+ * Threshold 0 means perfect match (no fuzziness), while threshold
+ * 0xffffffff means fuzziest match.
+ */
+struct rte_flow_item_fuzzy {
+ uint32_t thresh; /**< Accuracy threshold. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_FUZZY. */
+#ifndef __cplusplus
+static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
+ .thresh = 0xffffffff,
};
#endif
@@ -1191,6 +1225,90 @@ rte_flow_query(uint8_t port_id,
void *data,
struct rte_flow_error *error);
+/**
+ * Restrict ingress traffic to the defined flow rules.
+ *
+ * Isolated mode guarantees that all ingress traffic comes from defined flow
+ * rules only (current and future).
+ *
+ * Besides making ingress more deterministic, it allows PMDs to safely reuse
+ * resources otherwise assigned to handle the remaining traffic, such as
+ * global RSS configuration settings, VLAN filters, MAC address entries,
+ * legacy filter API rules and so on in order to expand the set of possible
+ * flow rule types.
+ *
+ * Calling this function as soon as possible after device initialization,
+ * ideally before the first call to rte_eth_dev_configure(), is recommended
+ * to avoid possible failures due to conflicting settings.
+ *
+ * Once effective, leaving isolated mode may not be possible depending on
+ * PMD implementation.
+ *
+ * Additionally, the following functionality has no effect on the underlying
+ * port and may return errors such as ENOTSUP ("not supported"):
+ *
+ * - Toggling promiscuous mode.
+ * - Toggling allmulticast mode.
+ * - Configuring MAC addresses.
+ * - Configuring multicast addresses.
+ * - Configuring VLAN filters.
+ * - Configuring Rx filters through the legacy API (e.g. FDIR).
+ * - Configuring global RSS settings.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param set
+ * Nonzero to enter isolated mode, attempt to leave it otherwise.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+rte_flow_isolate(uint8_t port_id, int set, struct rte_flow_error *error);
+
+/**
+ * Generic flow representation.
+ *
+ * This form is sufficient to describe an rte_flow independently from any
+ * PMD implementation and allows for replayability and identification.
+ */
+struct rte_flow_desc {
+ size_t size; /**< Allocated space including data[]. */
+ struct rte_flow_attr attr; /**< Attributes. */
+ struct rte_flow_item *items; /**< Items. */
+ struct rte_flow_action *actions; /**< Actions. */
+ uint8_t data[]; /**< Storage for items/actions. */
+};
+
+/**
+ * Copy an rte_flow rule description.
+ *
+ * @param[in] fd
+ * Flow rule description.
+ * @param[in] len
+ * Total size of allocated data for the flow description.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ *
+ * @return
+ * If len is greater or equal to the size of the flow, the total size of the
+ * flow description and its data.
+ * If len is lower than the size of the flow, the number of bytes that would
+ * have been written to desc had it been sufficient. Nothing is written.
+ */
+size_t
+rte_flow_copy(struct rte_flow_desc *fd, size_t len,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ether/rte_flow_driver.h
index da5749d5..4d95391d 100644
--- a/lib/librte_ether/rte_flow_driver.h
+++ b/lib/librte_ether/rte_flow_driver.h
@@ -120,6 +120,11 @@ struct rte_flow_ops {
enum rte_flow_action_type,
void *,
struct rte_flow_error *);
+ /** See rte_flow_isolate(). */
+ int (*isolate)
+ (struct rte_eth_dev *,
+ int,
+ struct rte_flow_error *);
};
/**
diff --git a/lib/librte_ether/rte_tm.c b/lib/librte_ether/rte_tm.c
new file mode 100644
index 00000000..71679650
--- /dev/null
+++ b/lib/librte_ether/rte_tm.c
@@ -0,0 +1,438 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include "rte_ethdev.h"
+#include "rte_tm_driver.h"
+#include "rte_tm.h"
+
+/* Get generic traffic manager operations structure from a port. */
+const struct rte_tm_ops *
+rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_tm_ops *ops;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ rte_tm_error_set(error,
+ ENODEV,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+
+ if ((dev->dev_ops->tm_ops_get == NULL) ||
+ (dev->dev_ops->tm_ops_get(dev, &ops) != 0) ||
+ (ops == NULL)) {
+ rte_tm_error_set(error,
+ ENOSYS,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+ }
+
+ return ops;
+}
+
+#define RTE_TM_FUNC(port_id, func) \
+({ \
+ const struct rte_tm_ops *ops = \
+ rte_tm_ops_get(port_id, error); \
+ if (ops == NULL) \
+ return -rte_errno; \
+ \
+ if (ops->func == NULL) \
+ return -rte_tm_error_set(error, \
+ ENOSYS, \
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, \
+ NULL, \
+ rte_strerror(ENOSYS)); \
+ \
+ ops->func; \
+})
+
+/* Get number of leaf nodes */
+int
+rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
+ uint32_t *n_leaf_nodes,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_tm_ops *ops =
+ rte_tm_ops_get(port_id, error);
+
+ if (ops == NULL)
+ return -rte_errno;
+
+ if (n_leaf_nodes == NULL) {
+ rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return -rte_errno;
+ }
+
+ *n_leaf_nodes = dev->data->nb_tx_queues;
+ return 0;
+}
+
+/* Check node type (leaf or non-leaf) */
+int
+rte_tm_node_type_get(uint8_t port_id,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_type_get)(dev,
+ node_id, is_leaf, error);
+}
+
+/* Get capabilities */
+int rte_tm_capabilities_get(uint8_t port_id,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, capabilities_get)(dev,
+ cap, error);
+}
+
+/* Get level capabilities */
+int rte_tm_level_capabilities_get(uint8_t port_id,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, level_capabilities_get)(dev,
+ level_id, cap, error);
+}
+
+/* Get node capabilities */
+int rte_tm_node_capabilities_get(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_capabilities_get)(dev,
+ node_id, cap, error);
+}
+
+/* Add WRED profile */
+int rte_tm_wred_profile_add(uint8_t port_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, wred_profile_add)(dev,
+ wred_profile_id, profile, error);
+}
+
+/* Delete WRED profile */
+int rte_tm_wred_profile_delete(uint8_t port_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, wred_profile_delete)(dev,
+ wred_profile_id, error);
+}
+
+/* Add/update shared WRED context */
+int rte_tm_shared_wred_context_add_update(uint8_t port_id,
+ uint32_t shared_wred_context_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shared_wred_context_add_update)(dev,
+ shared_wred_context_id, wred_profile_id, error);
+}
+
+/* Delete shared WRED context */
+int rte_tm_shared_wred_context_delete(uint8_t port_id,
+ uint32_t shared_wred_context_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shared_wred_context_delete)(dev,
+ shared_wred_context_id, error);
+}
+
+/* Add shaper profile */
+int rte_tm_shaper_profile_add(uint8_t port_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shaper_profile_add)(dev,
+ shaper_profile_id, profile, error);
+}
+
+/* Delete WRED profile */
+int rte_tm_shaper_profile_delete(uint8_t port_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shaper_profile_delete)(dev,
+ shaper_profile_id, error);
+}
+
+/* Add shared shaper */
+int rte_tm_shared_shaper_add_update(uint8_t port_id,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shared_shaper_add_update)(dev,
+ shared_shaper_id, shaper_profile_id, error);
+}
+
+/* Delete shared shaper */
+int rte_tm_shared_shaper_delete(uint8_t port_id,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, shared_shaper_delete)(dev,
+ shared_shaper_id, error);
+}
+
+/* Add node to port traffic manager hierarchy */
+int rte_tm_node_add(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_add)(dev,
+ node_id, parent_node_id, priority, weight, level_id,
+ params, error);
+}
+
+/* Delete node from traffic manager hierarchy */
+int rte_tm_node_delete(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_delete)(dev,
+ node_id, error);
+}
+
+/* Suspend node */
+int rte_tm_node_suspend(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_suspend)(dev,
+ node_id, error);
+}
+
+/* Resume node */
+int rte_tm_node_resume(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_resume)(dev,
+ node_id, error);
+}
+
+/* Commit the initial port traffic manager hierarchy */
+int rte_tm_hierarchy_commit(uint8_t port_id,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, hierarchy_commit)(dev,
+ clear_on_fail, error);
+}
+
+/* Update node parent */
+int rte_tm_node_parent_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_parent_update)(dev,
+ node_id, parent_node_id, priority, weight, error);
+}
+
+/* Update node private shaper */
+int rte_tm_node_shaper_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_shaper_update)(dev,
+ node_id, shaper_profile_id, error);
+}
+
+/* Update node shared shapers */
+int rte_tm_node_shared_shaper_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shared_shaper_id,
+ int add,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_shared_shaper_update)(dev,
+ node_id, shared_shaper_id, add, error);
+}
+
+/* Update node stats */
+int rte_tm_node_stats_update(uint8_t port_id,
+ uint32_t node_id,
+ uint64_t stats_mask,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_stats_update)(dev,
+ node_id, stats_mask, error);
+}
+
+/* Update WFQ weight mode */
+int rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
+ uint32_t node_id,
+ int *wfq_weight_mode,
+ uint32_t n_sp_priorities,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_wfq_weight_mode_update)(dev,
+ node_id, wfq_weight_mode, n_sp_priorities, error);
+}
+
+/* Update node congestion management mode */
+int rte_tm_node_cman_update(uint8_t port_id,
+ uint32_t node_id,
+ enum rte_tm_cman_mode cman,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_cman_update)(dev,
+ node_id, cman, error);
+}
+
+/* Update node private WRED context */
+int rte_tm_node_wred_context_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_wred_context_update)(dev,
+ node_id, wred_profile_id, error);
+}
+
+/* Update node shared WRED context */
+int rte_tm_node_shared_wred_context_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shared_wred_context_id,
+ int add,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_shared_wred_context_update)(dev,
+ node_id, shared_wred_context_id, add, error);
+}
+
+/* Read and/or clear stats counters for specific node */
+int rte_tm_node_stats_read(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, node_stats_read)(dev,
+ node_id, stats, stats_mask, clear, error);
+}
+
+/* Packet marking - VLAN DEI */
+int rte_tm_mark_vlan_dei(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, mark_vlan_dei)(dev,
+ mark_green, mark_yellow, mark_red, error);
+}
+
+/* Packet marking - IPv4/IPv6 ECN */
+int rte_tm_mark_ip_ecn(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, mark_ip_ecn)(dev,
+ mark_green, mark_yellow, mark_red, error);
+}
+
+/* Packet marking - IPv4/IPv6 DSCP */
+int rte_tm_mark_ip_dscp(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_TM_FUNC(port_id, mark_ip_dscp)(dev,
+ mark_green, mark_yellow, mark_red, error);
+}
diff --git a/lib/librte_ether/rte_tm.h b/lib/librte_ether/rte_tm.h
new file mode 100644
index 00000000..ebbfa1ee
--- /dev/null
+++ b/lib/librte_ether/rte_tm.h
@@ -0,0 +1,1912 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 Cavium.
+ * Copyright(c) 2017 NXP.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TM_H__
+#define __INCLUDE_RTE_TM_H__
+
+/**
+ * @file
+ * RTE Generic Traffic Manager API
+ *
+ * This interface provides the ability to configure the traffic manager in a
+ * generic way. It includes features such as: hierarchical scheduling,
+ * traffic shaping, congestion management, packet marking, etc.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Ethernet framing overhead.
+ *
+ * Overhead fields per Ethernet frame:
+ * 1. Preamble: 7 bytes;
+ * 2. Start of Frame Delimiter (SFD): 1 byte;
+ * 3. Inter-Frame Gap (IFG): 12 bytes.
+ *
+ * One of the typical values for the *pkt_length_adjust* field of the shaper
+ * profile.
+ *
+ * @see struct rte_tm_shaper_params
+ */
+#define RTE_TM_ETH_FRAMING_OVERHEAD 20
+
+/**
+ * Ethernet framing overhead including the Frame Check Sequence (FCS) field.
+ * Useful when FCS is generated and added at the end of the Ethernet frame on
+ * TX side without any SW intervention.
+ *
+ * One of the typical values for the pkt_length_adjust field of the shaper
+ * profile.
+ *
+ * @see struct rte_tm_shaper_params
+ */
+#define RTE_TM_ETH_FRAMING_OVERHEAD_FCS 24
+
+/**
+ * Invalid WRED profile ID.
+ *
+ * @see struct rte_tm_node_params
+ * @see rte_tm_node_add()
+ * @see rte_tm_node_wred_context_update()
+ */
+#define RTE_TM_WRED_PROFILE_ID_NONE UINT32_MAX
+
+/**
+ *Invalid shaper profile ID.
+ *
+ * @see struct rte_tm_node_params
+ * @see rte_tm_node_add()
+ * @see rte_tm_node_shaper_update()
+ */
+#define RTE_TM_SHAPER_PROFILE_ID_NONE UINT32_MAX
+
+/**
+ * Node ID for the parent of the root node.
+ *
+ * @see rte_tm_node_add()
+ */
+#define RTE_TM_NODE_ID_NULL UINT32_MAX
+
+/**
+ * Node level ID used to disable level ID checking.
+ *
+ * @see rte_tm_node_add()
+ */
+#define RTE_TM_NODE_LEVEL_ID_ANY UINT32_MAX
+
+/**
+ * Color
+ */
+enum rte_tm_color {
+ RTE_TM_GREEN = 0, /**< Green */
+ RTE_TM_YELLOW, /**< Yellow */
+ RTE_TM_RED, /**< Red */
+ RTE_TM_COLORS /**< Number of colors */
+};
+
+/**
+ * Node statistics counter type
+ */
+enum rte_tm_stats_type {
+ /** Number of packets scheduled from current node. */
+ RTE_TM_STATS_N_PKTS = 1 << 0,
+
+ /** Number of bytes scheduled from current node. */
+ RTE_TM_STATS_N_BYTES = 1 << 1,
+
+ /** Number of green packets dropped by current leaf node. */
+ RTE_TM_STATS_N_PKTS_GREEN_DROPPED = 1 << 2,
+
+ /** Number of yellow packets dropped by current leaf node. */
+ RTE_TM_STATS_N_PKTS_YELLOW_DROPPED = 1 << 3,
+
+ /** Number of red packets dropped by current leaf node. */
+ RTE_TM_STATS_N_PKTS_RED_DROPPED = 1 << 4,
+
+ /** Number of green bytes dropped by current leaf node. */
+ RTE_TM_STATS_N_BYTES_GREEN_DROPPED = 1 << 5,
+
+ /** Number of yellow bytes dropped by current leaf node. */
+ RTE_TM_STATS_N_BYTES_YELLOW_DROPPED = 1 << 6,
+
+ /** Number of red bytes dropped by current leaf node. */
+ RTE_TM_STATS_N_BYTES_RED_DROPPED = 1 << 7,
+
+ /** Number of packets currently waiting in the packet queue of current
+ * leaf node.
+ */
+ RTE_TM_STATS_N_PKTS_QUEUED = 1 << 8,
+
+ /** Number of bytes currently waiting in the packet queue of current
+ * leaf node.
+ */
+ RTE_TM_STATS_N_BYTES_QUEUED = 1 << 9,
+};
+
+/**
+ * Node statistics counters
+ */
+struct rte_tm_node_stats {
+ /** Number of packets scheduled from current node. */
+ uint64_t n_pkts;
+
+ /** Number of bytes scheduled from current node. */
+ uint64_t n_bytes;
+
+ /** Statistics counters for leaf nodes only. */
+ struct {
+ /** Number of packets dropped by current leaf node per each
+ * color.
+ */
+ uint64_t n_pkts_dropped[RTE_TM_COLORS];
+
+ /** Number of bytes dropped by current leaf node per each
+ * color.
+ */
+ uint64_t n_bytes_dropped[RTE_TM_COLORS];
+
+ /** Number of packets currently waiting in the packet queue of
+ * current leaf node.
+ */
+ uint64_t n_pkts_queued;
+
+ /** Number of bytes currently waiting in the packet queue of
+ * current leaf node.
+ */
+ uint64_t n_bytes_queued;
+ } leaf;
+};
+
+/**
+ * Traffic manager dynamic updates
+ */
+enum rte_tm_dynamic_update_type {
+ /** Dynamic parent node update. The new parent node is located on same
+ * hierarchy level as the former parent node. Consequently, the node
+ * whose parent is changed preserves its hierarchy level.
+ */
+ RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL = 1 << 0,
+
+ /** Dynamic parent node update. The new parent node is located on
+ * different hierarchy level than the former parent node. Consequently,
+ * the node whose parent is changed also changes its hierarchy level.
+ */
+ RTE_TM_UPDATE_NODE_PARENT_CHANGE_LEVEL = 1 << 1,
+
+ /** Dynamic node add/delete. */
+ RTE_TM_UPDATE_NODE_ADD_DELETE = 1 << 2,
+
+ /** Suspend/resume nodes. */
+ RTE_TM_UPDATE_NODE_SUSPEND_RESUME = 1 << 3,
+
+ /** Dynamic switch between byte-based and packet-based WFQ weights. */
+ RTE_TM_UPDATE_NODE_WFQ_WEIGHT_MODE = 1 << 4,
+
+ /** Dynamic update on number of SP priorities. */
+ RTE_TM_UPDATE_NODE_N_SP_PRIORITIES = 1 << 5,
+
+ /** Dynamic update of congestion management mode for leaf nodes. */
+ RTE_TM_UPDATE_NODE_CMAN = 1 << 6,
+
+ /** Dynamic update of the set of enabled stats counter types. */
+ RTE_TM_UPDATE_NODE_STATS = 1 << 7,
+};
+
+/**
+ * Traffic manager capabilities
+ */
+struct rte_tm_capabilities {
+ /** Maximum number of nodes. */
+ uint32_t n_nodes_max;
+
+ /** Maximum number of levels (i.e. number of nodes connecting the root
+ * node with any leaf node, including the root and the leaf).
+ */
+ uint32_t n_levels_max;
+
+ /** When non-zero, this flag indicates that all the non-leaf nodes
+ * (with the exception of the root node) have identical capability set.
+ */
+ int non_leaf_nodes_identical;
+
+ /** When non-zero, this flag indicates that all the leaf nodes have
+ * identical capability set.
+ */
+ int leaf_nodes_identical;
+
+ /** Maximum number of shapers, either private or shared. In case the
+ * implementation does not share any resources between private and
+ * shared shapers, it is typically equal to the sum of
+ * *shaper_private_n_max* and *shaper_shared_n_max*. The
+ * value of zero indicates that traffic shaping is not supported.
+ */
+ uint32_t shaper_n_max;
+
+ /** Maximum number of private shapers. Indicates the maximum number of
+ * nodes that can concurrently have their private shaper enabled. The
+ * value of zero indicates that private shapers are not supported.
+ */
+ uint32_t shaper_private_n_max;
+
+ /** Maximum number of private shapers that support dual rate shaping.
+ * Indicates the maximum number of nodes that can concurrently have
+ * their private shaper enabled with dual rate support. Only valid when
+ * private shapers are supported. The value of zero indicates that dual
+ * rate shaping is not available for private shapers. The maximum value
+ * is *shaper_private_n_max*.
+ */
+ int shaper_private_dual_rate_n_max;
+
+ /** Minimum committed/peak rate (bytes per second) for any private
+ * shaper. Valid only when private shapers are supported.
+ */
+ uint64_t shaper_private_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for any private
+ * shaper. Valid only when private shapers are supported.
+ */
+ uint64_t shaper_private_rate_max;
+
+ /** Maximum number of shared shapers. The value of zero indicates that
+ * shared shapers are not supported.
+ */
+ uint32_t shaper_shared_n_max;
+
+ /** Maximum number of nodes that can share the same shared shaper.
+ * Only valid when shared shapers are supported.
+ */
+ uint32_t shaper_shared_n_nodes_per_shaper_max;
+
+ /** Maximum number of shared shapers a node can be part of. This
+ * parameter indicates that there is at least one node that can be
+ * configured with this many shared shapers, which might not be true for
+ * all the nodes. Only valid when shared shapers are supported, in which
+ * case it ranges from 1 to *shaper_shared_n_max*.
+ */
+ uint32_t shaper_shared_n_shapers_per_node_max;
+
+ /** Maximum number of shared shapers that can be configured with dual
+ * rate shaping. The value of zero indicates that dual rate shaping
+ * support is not available for shared shapers.
+ */
+ uint32_t shaper_shared_dual_rate_n_max;
+
+ /** Minimum committed/peak rate (bytes per second) for any shared
+ * shaper. Only valid when shared shapers are supported.
+ */
+ uint64_t shaper_shared_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for any shared
+ * shaper. Only valid when shared shapers are supported.
+ */
+ uint64_t shaper_shared_rate_max;
+
+ /** Minimum value allowed for packet length adjustment for any private
+ * or shared shaper.
+ */
+ int shaper_pkt_length_adjust_min;
+
+ /** Maximum value allowed for packet length adjustment for any private
+ * or shared shaper.
+ */
+ int shaper_pkt_length_adjust_max;
+
+ /** Maximum number of children nodes. This parameter indicates that
+ * there is at least one non-leaf node that can be configured with this
+ * many children nodes, which might not be true for all the non-leaf
+ * nodes.
+ */
+ uint32_t sched_n_children_max;
+
+ /** Maximum number of supported priority levels. This parameter
+ * indicates that there is at least one non-leaf node that can be
+ * configured with this many priority levels for managing its children
+ * nodes, which might not be true for all the non-leaf nodes. The value
+ * of zero is invalid. The value of 1 indicates that only priority 0 is
+ * supported, which essentially means that Strict Priority (SP)
+ * algorithm is not supported.
+ */
+ uint32_t sched_sp_n_priorities_max;
+
+ /** Maximum number of sibling nodes that can have the same priority at
+ * any given time, i.e. maximum size of the WFQ sibling node group. This
+ * parameter indicates there is at least one non-leaf node that meets
+ * this condition, which might not be true for all the non-leaf nodes.
+ * The value of zero is invalid. The value of 1 indicates that WFQ
+ * algorithm is not supported. The maximum value is
+ * *sched_n_children_max*.
+ */
+ uint32_t sched_wfq_n_children_per_group_max;
+
+ /** Maximum number of priority levels that can have more than one child
+ * node at any given time, i.e. maximum number of WFQ sibling node
+ * groups that have two or more members. This parameter indicates there
+ * is at least one non-leaf node that meets this condition, which might
+ * not be true for all the non-leaf nodes. The value of zero states that
+ * WFQ algorithm is not supported. The value of 1 indicates that
+ * (*sched_sp_n_priorities_max* - 1) priority levels have at most one
+ * child node, so there can be only one priority level with two or
+ * more sibling nodes making up a WFQ group. The maximum value is:
+ * min(floor(*sched_n_children_max* / 2), *sched_sp_n_priorities_max*).
+ */
+ uint32_t sched_wfq_n_groups_max;
+
+ /** Maximum WFQ weight. The value of 1 indicates that all sibling nodes
+ * with same priority have the same WFQ weight, so WFQ is reduced to FQ.
+ */
+ uint32_t sched_wfq_weight_max;
+
+ /** Head drop algorithm support. When non-zero, this parameter
+ * indicates that there is at least one leaf node that supports the head
+ * drop algorithm, which might not be true for all the leaf nodes.
+ */
+ int cman_head_drop_supported;
+
+ /** Maximum number of WRED contexts, either private or shared. In case
+ * the implementation does not share any resources between private and
+ * shared WRED contexts, it is typically equal to the sum of
+ * *cman_wred_context_private_n_max* and
+ * *cman_wred_context_shared_n_max*. The value of zero indicates that
+ * WRED is not supported.
+ */
+ uint32_t cman_wred_context_n_max;
+
+ /** Maximum number of private WRED contexts. Indicates the maximum
+ * number of leaf nodes that can concurrently have their private WRED
+ * context enabled. The value of zero indicates that private WRED
+ * contexts are not supported.
+ */
+ uint32_t cman_wred_context_private_n_max;
+
+ /** Maximum number of shared WRED contexts. The value of zero
+ * indicates that shared WRED contexts are not supported.
+ */
+ uint32_t cman_wred_context_shared_n_max;
+
+ /** Maximum number of leaf nodes that can share the same WRED context.
+ * Only valid when shared WRED contexts are supported.
+ */
+ uint32_t cman_wred_context_shared_n_nodes_per_context_max;
+
+ /** Maximum number of shared WRED contexts a leaf node can be part of.
+ * This parameter indicates that there is at least one leaf node that
+ * can be configured with this many shared WRED contexts, which might
+ * not be true for all the leaf nodes. Only valid when shared WRED
+ * contexts are supported, in which case it ranges from 1 to
+ * *cman_wred_context_shared_n_max*.
+ */
+ uint32_t cman_wred_context_shared_n_contexts_per_node_max;
+
+ /** Support for VLAN DEI packet marking (per color). */
+ int mark_vlan_dei_supported[RTE_TM_COLORS];
+
+ /** Support for IPv4/IPv6 ECN marking of TCP packets (per color). */
+ int mark_ip_ecn_tcp_supported[RTE_TM_COLORS];
+
+ /** Support for IPv4/IPv6 ECN marking of SCTP packets (per color). */
+ int mark_ip_ecn_sctp_supported[RTE_TM_COLORS];
+
+ /** Support for IPv4/IPv6 DSCP packet marking (per color). */
+ int mark_ip_dscp_supported[RTE_TM_COLORS];
+
+ /** Set of supported dynamic update operations.
+ * @see enum rte_tm_dynamic_update_type
+ */
+ uint64_t dynamic_update_mask;
+
+ /** Set of supported statistics counter types.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+};
+
+/**
+ * Traffic manager level capabilities
+ */
+struct rte_tm_level_capabilities {
+ /** Maximum number of nodes for the current hierarchy level. */
+ uint32_t n_nodes_max;
+
+ /** Maximum number of non-leaf nodes for the current hierarchy level.
+ * The value of 0 indicates that current level only supports leaf
+ * nodes. The maximum value is *n_nodes_max*.
+ */
+ uint32_t n_nodes_nonleaf_max;
+
+ /** Maximum number of leaf nodes for the current hierarchy level. The
+ * value of 0 indicates that current level only supports non-leaf
+ * nodes. The maximum value is *n_nodes_max*.
+ */
+ uint32_t n_nodes_leaf_max;
+
+ /** When non-zero, this flag indicates that all the non-leaf nodes on
+ * this level have identical capability set. Valid only when
+ * *n_nodes_nonleaf_max* is non-zero.
+ */
+ int non_leaf_nodes_identical;
+
+ /** When non-zero, this flag indicates that all the leaf nodes on this
+ * level have identical capability set. Valid only when
+ * *n_nodes_leaf_max* is non-zero.
+ */
+ int leaf_nodes_identical;
+
+ RTE_STD_C11
+ union {
+ /** Items valid only for the non-leaf nodes on this level. */
+ struct {
+ /** Private shaper support. When non-zero, it indicates
+ * there is at least one non-leaf node on this level
+ * with private shaper support, which may not be the
+ * case for all the non-leaf nodes on this level.
+ */
+ int shaper_private_supported;
+
+ /** Dual rate support for private shaper. Valid only
+ * when private shaper is supported for the non-leaf
+ * nodes on the current level. When non-zero, it
+ * indicates there is at least one non-leaf node on this
+ * level with dual rate private shaper support, which
+ * may not be the case for all the non-leaf nodes on
+ * this level.
+ */
+ int shaper_private_dual_rate_supported;
+
+ /** Minimum committed/peak rate (bytes per second) for
+ * private shapers of the non-leaf nodes of this level.
+ * Valid only when private shaper is supported on this
+ * level.
+ */
+ uint64_t shaper_private_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for
+ * private shapers of the non-leaf nodes on this level.
+ * Valid only when private shaper is supported on this
+ * level.
+ */
+ uint64_t shaper_private_rate_max;
+
+ /** Maximum number of shared shapers that any non-leaf
+ * node on this level can be part of. The value of zero
+ * indicates that shared shapers are not supported by
+ * the non-leaf nodes on this level. When non-zero, it
+ * indicates there is at least one non-leaf node on this
+ * level that meets this condition, which may not be the
+ * case for all the non-leaf nodes on this level.
+ */
+ uint32_t shaper_shared_n_max;
+
+ /** Maximum number of children nodes. This parameter
+ * indicates that there is at least one non-leaf node on
+ * this level that can be configured with this many
+ * children nodes, which might not be true for all the
+ * non-leaf nodes on this level.
+ */
+ uint32_t sched_n_children_max;
+
+ /** Maximum number of supported priority levels. This
+ * parameter indicates that there is at least one
+ * non-leaf node on this level that can be configured
+ * with this many priority levels for managing its
+ * children nodes, which might not be true for all the
+ * non-leaf nodes on this level. The value of zero is
+ * invalid. The value of 1 indicates that only priority
+ * 0 is supported, which essentially means that Strict
+ * Priority (SP) algorithm is not supported on this
+ * level.
+ */
+ uint32_t sched_sp_n_priorities_max;
+
+ /** Maximum number of sibling nodes that can have the
+ * same priority at any given time, i.e. maximum size of
+ * the WFQ sibling node group. This parameter indicates
+ * there is at least one non-leaf node on this level
+ * that meets this condition, which may not be true for
+ * all the non-leaf nodes on this level. The value of
+ * zero is invalid. The value of 1 indicates that WFQ
+ * algorithm is not supported on this level. The maximum
+ * value is *sched_n_children_max*.
+ */
+ uint32_t sched_wfq_n_children_per_group_max;
+
+ /** Maximum number of priority levels that can have
+ * more than one child node at any given time, i.e.
+ * maximum number of WFQ sibling node groups that
+ * have two or more members. This parameter indicates
+ * there is at least one non-leaf node on this level
+ * that meets this condition, which might not be true
+ * for all the non-leaf nodes. The value of zero states
+ * that WFQ algorithm is not supported on this level.
+ * The value of 1 indicates that
+ * (*sched_sp_n_priorities_max* - 1) priority levels on
+ * this level have at most one child node, so there can
+ * be only one priority level with two or more sibling
+ * nodes making up a WFQ group on this level. The
+ * maximum value is:
+ * min(floor(*sched_n_children_max* / 2),
+ * *sched_sp_n_priorities_max*).
+ */
+ uint32_t sched_wfq_n_groups_max;
+
+ /** Maximum WFQ weight. The value of 1 indicates that
+ * all sibling nodes on this level with same priority
+ * have the same WFQ weight, so on this level WFQ is
+ * reduced to FQ.
+ */
+ uint32_t sched_wfq_weight_max;
+
+ /** Mask of statistics counter types supported by the
+ * non-leaf nodes on this level. Every supported
+ * statistics counter type is supported by at least one
+ * non-leaf node on this level, which may not be true
+ * for all the non-leaf nodes on this level.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+ } nonleaf;
+
+ /** Items valid only for the leaf nodes on this level. */
+ struct {
+ /** Private shaper support. When non-zero, it indicates
+ * there is at least one leaf node on this level with
+ * private shaper support, which may not be the case for
+ * all the leaf nodes on this level.
+ */
+ int shaper_private_supported;
+
+ /** Dual rate support for private shaper. Valid only
+ * when private shaper is supported for the leaf nodes
+ * on this level. When non-zero, it indicates there is
+ * at least one leaf node on this level with dual rate
+ * private shaper support, which may not be the case for
+ * all the leaf nodes on this level.
+ */
+ int shaper_private_dual_rate_supported;
+
+ /** Minimum committed/peak rate (bytes per second) for
+ * private shapers of the leaf nodes of this level.
+ * Valid only when private shaper is supported for the
+ * leaf nodes on this level.
+ */
+ uint64_t shaper_private_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for
+ * private shapers of the leaf nodes on this level.
+ * Valid only when private shaper is supported for the
+ * leaf nodes on this level.
+ */
+ uint64_t shaper_private_rate_max;
+
+ /** Maximum number of shared shapers that any leaf node
+ * on this level can be part of. The value of zero
+ * indicates that shared shapers are not supported by
+ * the leaf nodes on this level. When non-zero, it
+ * indicates there is at least one leaf node on this
+ * level that meets this condition, which may not be the
+ * case for all the leaf nodes on this level.
+ */
+ uint32_t shaper_shared_n_max;
+
+ /** Head drop algorithm support. When non-zero, this
+ * parameter indicates that there is at least one leaf
+ * node on this level that supports the head drop
+ * algorithm, which might not be true for all the leaf
+ * nodes on this level.
+ */
+ int cman_head_drop_supported;
+
+ /** Private WRED context support. When non-zero, it
+ * indicates there is at least one node on this level
+ * with private WRED context support, which may not be
+ * true for all the leaf nodes on this level.
+ */
+ int cman_wred_context_private_supported;
+
+ /** Maximum number of shared WRED contexts that any
+ * leaf node on this level can be part of. The value of
+ * zero indicates that shared WRED contexts are not
+ * supported by the leaf nodes on this level. When
+ * non-zero, it indicates there is at least one leaf
+ * node on this level that meets this condition, which
+ * may not be the case for all the leaf nodes on this
+ * level.
+ */
+ uint32_t cman_wred_context_shared_n_max;
+
+ /** Mask of statistics counter types supported by the
+ * leaf nodes on this level. Every supported statistics
+ * counter type is supported by at least one leaf node
+ * on this level, which may not be true for all the leaf
+ * nodes on this level.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+ } leaf;
+ };
+};
+
+/**
+ * Traffic manager node capabilities
+ */
+struct rte_tm_node_capabilities {
+ /** Private shaper support for the current node. */
+ int shaper_private_supported;
+
+ /** Dual rate shaping support for private shaper of current node.
+ * Valid only when private shaper is supported by the current node.
+ */
+ int shaper_private_dual_rate_supported;
+
+ /** Minimum committed/peak rate (bytes per second) for private
+ * shaper of current node. Valid only when private shaper is supported
+ * by the current node.
+ */
+ uint64_t shaper_private_rate_min;
+
+ /** Maximum committed/peak rate (bytes per second) for private
+ * shaper of current node. Valid only when private shaper is supported
+ * by the current node.
+ */
+ uint64_t shaper_private_rate_max;
+
+ /** Maximum number of shared shapers the current node can be part of.
+ * The value of zero indicates that shared shapers are not supported by
+ * the current node.
+ */
+ uint32_t shaper_shared_n_max;
+
+ RTE_STD_C11
+ union {
+ /** Items valid only for non-leaf nodes. */
+ struct {
+ /** Maximum number of children nodes. */
+ uint32_t sched_n_children_max;
+
+ /** Maximum number of supported priority levels. The
+ * value of zero is invalid. The value of 1 indicates
+ * that only priority 0 is supported, which essentially
+ * means that Strict Priority (SP) algorithm is not
+ * supported.
+ */
+ uint32_t sched_sp_n_priorities_max;
+
+ /** Maximum number of sibling nodes that can have the
+ * same priority at any given time, i.e. maximum size
+ * of the WFQ sibling node group. The value of zero
+ * is invalid. The value of 1 indicates that WFQ
+ * algorithm is not supported. The maximum value is
+ * *sched_n_children_max*.
+ */
+ uint32_t sched_wfq_n_children_per_group_max;
+
+ /** Maximum number of priority levels that can have
+ * more than one child node at any given time, i.e.
+ * maximum number of WFQ sibling node groups that have
+ * two or more members. The value of zero states that
+ * WFQ algorithm is not supported. The value of 1
+ * indicates that (*sched_sp_n_priorities_max* - 1)
+ * priority levels have at most one child node, so there
+ * can be only one priority level with two or more
+ * sibling nodes making up a WFQ group. The maximum
+ * value is: min(floor(*sched_n_children_max* / 2),
+ * *sched_sp_n_priorities_max*).
+ */
+ uint32_t sched_wfq_n_groups_max;
+
+ /** Maximum WFQ weight. The value of 1 indicates that
+ * all sibling nodes with same priority have the same
+ * WFQ weight, so WFQ is reduced to FQ.
+ */
+ uint32_t sched_wfq_weight_max;
+ } nonleaf;
+
+ /** Items valid only for leaf nodes. */
+ struct {
+ /** Head drop algorithm support for current node. */
+ int cman_head_drop_supported;
+
+ /** Private WRED context support for current node. */
+ int cman_wred_context_private_supported;
+
+ /** Maximum number of shared WRED contexts the current
+ * node can be part of. The value of zero indicates that
+ * shared WRED contexts are not supported by the current
+ * node.
+ */
+ uint32_t cman_wred_context_shared_n_max;
+ } leaf;
+ };
+
+ /** Mask of statistics counter types supported by the current node.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+};
+
+/**
+ * Congestion management (CMAN) mode
+ *
+ * This is used for controlling the admission of packets into a packet queue or
+ * group of packet queues on congestion. On request of writing a new packet
+ * into the current queue while the queue is full, the *tail drop* algorithm
+ * drops the new packet while leaving the queue unmodified, as opposed to *head
+ * drop* algorithm, which drops the packet at the head of the queue (the oldest
+ * packet waiting in the queue) and admits the new packet at the tail of the
+ * queue.
+ *
+ * The *Random Early Detection (RED)* algorithm works by proactively dropping
+ * more and more input packets as the queue occupancy builds up. When the queue
+ * is full or almost full, RED effectively works as *tail drop*. The *Weighted
+ * RED* algorithm uses a separate set of RED thresholds for each packet color.
+ */
+enum rte_tm_cman_mode {
+ RTE_TM_CMAN_TAIL_DROP = 0, /**< Tail drop */
+ RTE_TM_CMAN_HEAD_DROP, /**< Head drop */
+ RTE_TM_CMAN_WRED, /**< Weighted Random Early Detection (WRED) */
+};
+
+/**
+ * Random Early Detection (RED) profile
+ */
+struct rte_tm_red_params {
+ /** Minimum queue threshold */
+ uint16_t min_th;
+
+ /** Maximum queue threshold */
+ uint16_t max_th;
+
+ /** Inverse of packet marking probability maximum value (maxp), i.e.
+ * maxp_inv = 1 / maxp
+ */
+ uint16_t maxp_inv;
+
+ /** Negated log2 of queue weight (wq), i.e. wq = 1 / (2 ^ wq_log2) */
+ uint16_t wq_log2;
+};
+
+/**
+ * Weighted RED (WRED) profile
+ *
+ * Multiple WRED contexts can share the same WRED profile. Each leaf node with
+ * WRED enabled as its congestion management mode has zero or one private WRED
+ * context (only one leaf node using it) and/or zero, one or several shared
+ * WRED contexts (multiple leaf nodes use the same WRED context). A private
+ * WRED context is used to perform congestion management for a single leaf
+ * node, while a shared WRED context is used to perform congestion management
+ * for a group of leaf nodes.
+ */
+struct rte_tm_wred_params {
+ /** One set of RED parameters per packet color */
+ struct rte_tm_red_params red_params[RTE_TM_COLORS];
+};
+
+/**
+ * Token bucket
+ */
+struct rte_tm_token_bucket {
+ /** Token bucket rate (bytes per second) */
+ uint64_t rate;
+
+ /** Token bucket size (bytes), a.k.a. max burst size */
+ uint64_t size;
+};
+
+/**
+ * Shaper (rate limiter) profile
+ *
+ * Multiple shaper instances can share the same shaper profile. Each node has
+ * zero or one private shaper (only one node using it) and/or zero, one or
+ * several shared shapers (multiple nodes use the same shaper instance).
+ * A private shaper is used to perform traffic shaping for a single node, while
+ * a shared shaper is used to perform traffic shaping for a group of nodes.
+ *
+ * Single rate shapers use a single token bucket. A single rate shaper can be
+ * configured by setting the rate of the committed bucket to zero, which
+ * effectively disables this bucket. The peak bucket is used to limit the rate
+ * and the burst size for the current shaper.
+ *
+ * Dual rate shapers use both the committed and the peak token buckets. The
+ * rate of the peak bucket has to be bigger than zero, as well as greater than
+ * or equal to the rate of the committed bucket.
+ */
+struct rte_tm_shaper_params {
+ /** Committed token bucket */
+ struct rte_tm_token_bucket committed;
+
+ /** Peak token bucket */
+ struct rte_tm_token_bucket peak;
+
+ /** Signed value to be added to the length of each packet for the
+ * purpose of shaping. Can be used to correct the packet length with
+ * the framing overhead bytes that are also consumed on the wire (e.g.
+ * RTE_TM_ETH_FRAMING_OVERHEAD_FCS).
+ */
+ int32_t pkt_length_adjust;
+};
+
+/**
+ * Node parameters
+ *
+ * Each non-leaf node has multiple inputs (its children nodes) and single output
+ * (which is input to its parent node). It arbitrates its inputs using Strict
+ * Priority (SP) and Weighted Fair Queuing (WFQ) algorithms to schedule input
+ * packets to its output while observing its shaping (rate limiting)
+ * constraints.
+ *
+ * Algorithms such as Weighted Round Robin (WRR), Byte-level WRR, Deficit WRR
+ * (DWRR), etc. are considered approximations of the WFQ ideal and are
+ * assimilated to WFQ, although an associated implementation-dependent trade-off
+ * on accuracy, performance and resource usage might exist.
+ *
+ * Children nodes with different priorities are scheduled using the SP algorithm
+ * based on their priority, with zero (0) as the highest priority. Children with
+ * the same priority are scheduled using the WFQ algorithm according to their
+ * weights. The WFQ weight of a given child node is relative to the sum of the
+ * weights of all its sibling nodes that have the same priority, with one (1) as
+ * the lowest weight. For each SP priority, the WFQ weight mode can be set as
+ * either byte-based or packet-based.
+ *
+ * Each leaf node sits on top of a TX queue of the current Ethernet port. Hence,
+ * the leaf nodes are predefined, with their node IDs set to 0 .. (N-1), where N
+ * is the number of TX queues configured for the current Ethernet port. The
+ * non-leaf nodes have their IDs generated by the application.
+ */
+struct rte_tm_node_params {
+ /** Shaper profile for the private shaper. The absence of the private
+ * shaper for the current node is indicated by setting this parameter
+ * to RTE_TM_SHAPER_PROFILE_ID_NONE.
+ */
+ uint32_t shaper_profile_id;
+
+ /** User allocated array of valid shared shaper IDs. */
+ uint32_t *shared_shaper_id;
+
+ /** Number of shared shaper IDs in the *shared_shaper_id* array. */
+ uint32_t n_shared_shapers;
+
+ RTE_STD_C11
+ union {
+ /** Parameters only valid for non-leaf nodes. */
+ struct {
+ /** WFQ weight mode for each SP priority. When NULL, it
+ * indicates that WFQ is to be used for all priorities.
+ * When non-NULL, it points to a pre-allocated array of
+ * *n_sp_priorities* values, with non-zero value for
+ * byte-mode and zero for packet-mode.
+ */
+ int *wfq_weight_mode;
+
+ /** Number of SP priorities. */
+ uint32_t n_sp_priorities;
+ } nonleaf;
+
+ /** Parameters only valid for leaf nodes. */
+ struct {
+ /** Congestion management mode */
+ enum rte_tm_cman_mode cman;
+
+ /** WRED parameters (only valid when *cman* is set to
+ * WRED).
+ */
+ struct {
+ /** WRED profile for private WRED context. The
+ * absence of a private WRED context for the
+ * current leaf node is indicated by value
+ * RTE_TM_WRED_PROFILE_ID_NONE.
+ */
+ uint32_t wred_profile_id;
+
+ /** User allocated array of shared WRED context
+ * IDs. When set to NULL, it indicates that the
+ * current leaf node should not currently be
+ * part of any shared WRED contexts.
+ */
+ uint32_t *shared_wred_context_id;
+
+ /** Number of elements in the
+ * *shared_wred_context_id* array. Only valid
+ * when *shared_wred_context_id* is non-NULL,
+ * in which case it should be non-zero.
+ */
+ uint32_t n_shared_wred_contexts;
+ } wred;
+ } leaf;
+ };
+
+ /** Mask of statistics counter types to be enabled for this node. This
+ * needs to be a subset of the statistics counter types available for
+ * the current node. Any statistics counter type not included in this
+ * set is to be disabled for the current node.
+ * @see enum rte_tm_stats_type
+ */
+ uint64_t stats_mask;
+};
+
+/**
+ * Verbose error types.
+ *
+ * Most of them provide the type of the object referenced by struct
+ * rte_tm_error::cause.
+ */
+enum rte_tm_error_type {
+ RTE_TM_ERROR_TYPE_NONE, /**< No error. */
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_GREEN,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_YELLOW,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_RED,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ RTE_TM_ERROR_TYPE_SHARED_WRED_CONTEXT_ID,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+};
+
+/**
+ * Verbose error structure definition.
+ *
+ * This object is normally allocated by applications and set by PMDs, the
+ * message points to a constant string which does not need to be freed by
+ * the application, however its pointer can be considered valid only as long
+ * as its associated DPDK port remains configured. Closing the underlying
+ * device or unloading the PMD invalidates it.
+ *
+ * Both cause and message may be NULL regardless of the error type.
+ */
+struct rte_tm_error {
+ enum rte_tm_error_type type; /**< Cause field and error type. */
+ const void *cause; /**< Object responsible for the error. */
+ const char *message; /**< Human-readable error message. */
+};
+
+/**
+ * Traffic manager get number of leaf nodes
+ *
+ * Each leaf node sits on on top of a TX queue of the current Ethernet port.
+ * Therefore, the set of leaf nodes is predefined, their number is always equal
+ * to N (where N is the number of TX queues configured for the current port)
+ * and their IDs are 0 .. (N-1).
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[out] n_leaf_nodes
+ * Number of leaf nodes for the current port.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
+ uint32_t *n_leaf_nodes,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node ID validate and type (i.e. leaf or non-leaf) get
+ *
+ * The leaf nodes have predefined IDs in the range of 0 .. (N-1), where N is
+ * the number of TX queues of the current Ethernet port. The non-leaf nodes
+ * have their IDs generated by the application outside of the above range,
+ * which is reserved for leaf nodes.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID value. Needs to be valid.
+ * @param[out] is_leaf
+ * Set to non-zero value when node is leaf and to zero otherwise (non-leaf).
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_node_type_get(uint8_t port_id,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager capabilities get
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[out] cap
+ * Traffic manager capabilities. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_capabilities_get(uint8_t port_id,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager level capabilities get
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] level_id
+ * The hierarchy level identifier. The value of 0 identifies the level of the
+ * root node.
+ * @param[out] cap
+ * Traffic manager level capabilities. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_level_capabilities_get(uint8_t port_id,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node capabilities get
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] cap
+ * Traffic manager node capabilities. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_tm_node_capabilities_get(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager WRED profile add
+ *
+ * Create a new WRED profile with ID set to *wred_profile_id*. The new profile
+ * is used to create one or several WRED contexts.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] wred_profile_id
+ * WRED profile ID for the new profile. Needs to be unused.
+ * @param[in] profile
+ * WRED profile parameters. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_n_max
+ */
+int
+rte_tm_wred_profile_add(uint8_t port_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager WRED profile delete
+ *
+ * Delete an existing WRED profile. This operation fails when there is
+ * currently at least one user (i.e. WRED context) of this WRED profile.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] wred_profile_id
+ * WRED profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_n_max
+ */
+int
+rte_tm_wred_profile_delete(uint8_t port_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shared WRED context add or update
+ *
+ * When *shared_wred_context_id* is invalid, a new WRED context with this ID is
+ * created by using the WRED profile identified by *wred_profile_id*.
+ *
+ * When *shared_wred_context_id* is valid, this WRED context is no longer using
+ * the profile previously assigned to it and is updated to use the profile
+ * identified by *wred_profile_id*.
+ *
+ * A valid shared WRED context can be assigned to several hierarchy leaf nodes
+ * configured to use WRED as the congestion management mode.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shared_wred_context_id
+ * Shared WRED context ID
+ * @param[in] wred_profile_id
+ * WRED profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
+ */
+int
+rte_tm_shared_wred_context_add_update(uint8_t port_id,
+ uint32_t shared_wred_context_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shared WRED context delete
+ *
+ * Delete an existing shared WRED context. This operation fails when there is
+ * currently at least one user (i.e. hierarchy leaf node) of this shared WRED
+ * context.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shared_wred_context_id
+ * Shared WRED context ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
+ */
+int
+rte_tm_shared_wred_context_delete(uint8_t port_id,
+ uint32_t shared_wred_context_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shaper profile add
+ *
+ * Create a new shaper profile with ID set to *shaper_profile_id*. The new
+ * shaper profile is used to create one or several shapers.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shaper_profile_id
+ * Shaper profile ID for the new profile. Needs to be unused.
+ * @param[in] profile
+ * Shaper profile parameters. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_n_max
+ */
+int
+rte_tm_shaper_profile_add(uint8_t port_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shaper profile delete
+ *
+ * Delete an existing shaper profile. This operation fails when there is
+ * currently at least one user (i.e. shaper) of this shaper profile.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shaper_profile_id
+ * Shaper profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_n_max
+ */
+int
+rte_tm_shaper_profile_delete(uint8_t port_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shared shaper add or update
+ *
+ * When *shared_shaper_id* is not a valid shared shaper ID, a new shared shaper
+ * with this ID is created using the shaper profile identified by
+ * *shaper_profile_id*.
+ *
+ * When *shared_shaper_id* is a valid shared shaper ID, this shared shaper is
+ * no longer using the shaper profile previously assigned to it and is updated
+ * to use the shaper profile identified by *shaper_profile_id*.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shared_shaper_id
+ * Shared shaper ID
+ * @param[in] shaper_profile_id
+ * Shaper profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_shared_n_max
+ */
+int
+rte_tm_shared_shaper_add_update(uint8_t port_id,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager shared shaper delete
+ *
+ * Delete an existing shared shaper. This operation fails when there is
+ * currently at least one user (i.e. hierarchy node) of this shared shaper.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] shared_shaper_id
+ * Shared shaper ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_shared_n_max
+ */
+int
+rte_tm_shared_shaper_delete(uint8_t port_id,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node add
+ *
+ * Create new node and connect it as child of an existing node. The new node is
+ * further identified by *node_id*, which needs to be unused by any of the
+ * existing nodes. The parent node is identified by *parent_node_id*, which
+ * needs to be the valid ID of an existing non-leaf node. The parent node is
+ * going to use the provided SP *priority* and WFQ *weight* to schedule its new
+ * child node.
+ *
+ * This function has to be called for both leaf and non-leaf nodes. In the case
+ * of leaf nodes (i.e. *node_id* is within the range of 0 .. (N-1), with N as
+ * the number of configured TX queues of the current port), the leaf node is
+ * configured rather than created (as the set of leaf nodes is predefined) and
+ * it is also connected as child of an existing node.
+ *
+ * The first node that is added becomes the root node and all the nodes that
+ * are subsequently added have to be added as descendants of the root node. The
+ * parent of the root node has to be specified as RTE_TM_NODE_ID_NULL and there
+ * can only be one node with this parent ID (i.e. the root node). Further
+ * restrictions for root node: needs to be non-leaf, its private shaper profile
+ * needs to be valid and single rate, cannot use any shared shapers.
+ *
+ * When called before rte_tm_hierarchy_commit() invocation, this function is
+ * typically used to define the initial start-up hierarchy for the port.
+ * Provided that dynamic hierarchy updates are supported by the current port (as
+ * advertised in the port capability set), this function can be also called
+ * after the rte_tm_hierarchy_commit() invocation.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be unused by any of the existing nodes.
+ * @param[in] parent_node_id
+ * Parent node ID. Needs to be the valid.
+ * @param[in] priority
+ * Node priority. The highest node priority is zero. Used by the SP algorithm
+ * running on the parent of the current node for scheduling this child node.
+ * @param[in] weight
+ * Node weight. The node weight is relative to the weight sum of all siblings
+ * that have the same priority. The lowest weight is one. Used by the WFQ
+ * algorithm running on the parent of the current node for scheduling this
+ * child node.
+ * @param[in] level_id
+ * Level ID that should be met by this node. The hierarchy level of the
+ * current node is already fully specified through its parent node (i.e. the
+ * level of this node is equal to the level of its parent node plus one),
+ * therefore the reason for providing this parameter is to enable the
+ * application to perform step-by-step checking of the node level during
+ * successive invocations of this function. When not desired, this check can
+ * be disabled by assigning value RTE_TM_NODE_LEVEL_ID_ANY to this parameter.
+ * @param[in] params
+ * Node parameters. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see rte_tm_hierarchy_commit()
+ * @see RTE_TM_UPDATE_NODE_ADD_DELETE
+ * @see RTE_TM_NODE_LEVEL_ID_ANY
+ * @see struct rte_tm_capabilities
+ */
+int
+rte_tm_node_add(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node delete
+ *
+ * Delete an existing node. This operation fails when this node currently has
+ * at least one user (i.e. child node).
+ *
+ * When called before rte_tm_hierarchy_commit() invocation, this function is
+ * typically used to define the initial start-up hierarchy for the port.
+ * Provided that dynamic hierarchy updates are supported by the current port (as
+ * advertised in the port capability set), this function can be also called
+ * after the rte_tm_hierarchy_commit() invocation.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see RTE_TM_UPDATE_NODE_ADD_DELETE
+ */
+int
+rte_tm_node_delete(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node suspend
+ *
+ * Suspend an existing node. While the node is in suspended state, no packet is
+ * scheduled from this node and its descendants. The node exits the suspended
+ * state through the node resume operation.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see rte_tm_node_resume()
+ * @see RTE_TM_UPDATE_NODE_SUSPEND_RESUME
+ */
+int
+rte_tm_node_suspend(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node resume
+ *
+ * Resume an existing node that is currently in suspended state. The node
+ * entered the suspended state as result of a previous node suspend operation.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see rte_tm_node_suspend()
+ * @see RTE_TM_UPDATE_NODE_SUSPEND_RESUME
+ */
+int
+rte_tm_node_resume(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager hierarchy commit
+ *
+ * This function is called during the port initialization phase (before the
+ * Ethernet port is started) to freeze the start-up hierarchy.
+ *
+ * This function typically performs the following steps:
+ * a) It validates the start-up hierarchy that was previously defined for the
+ * current port through successive rte_tm_node_add() invocations;
+ * b) Assuming successful validation, it performs all the necessary port
+ * specific configuration operations to install the specified hierarchy on
+ * the current port, with immediate effect once the port is started.
+ *
+ * This function fails when the currently configured hierarchy is not supported
+ * by the Ethernet port, in which case the user can abort or try out another
+ * hierarchy configuration (e.g. a hierarchy with less leaf nodes), which can be
+ * build from scratch (when *clear_on_fail* is enabled) or by modifying the
+ * existing hierarchy configuration (when *clear_on_fail* is disabled).
+ *
+ * Note that this function can still fail due to other causes (e.g. not enough
+ * memory available in the system, etc), even though the specified hierarchy is
+ * supported in principle by the current port.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] clear_on_fail
+ * On function call failure, hierarchy is cleared when this parameter is
+ * non-zero and preserved when this parameter is equal to zero.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see rte_tm_node_add()
+ * @see rte_tm_node_delete()
+ */
+int
+rte_tm_hierarchy_commit(uint8_t port_id,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node parent update
+ *
+ * Restriction for root node: its parent cannot be changed.
+ *
+ * This function can only be called after the rte_tm_hierarchy_commit()
+ * invocation. Its success depends on the port support for this operation, as
+ * advertised through the port capability set.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[in] parent_node_id
+ * Node ID for the new parent. Needs to be valid.
+ * @param[in] priority
+ * Node priority. The highest node priority is zero. Used by the SP algorithm
+ * running on the parent of the current node for scheduling this child node.
+ * @param[in] weight
+ * Node weight. The node weight is relative to the weight sum of all siblings
+ * that have the same priority. The lowest weight is zero. Used by the WFQ
+ * algorithm running on the parent of the current node for scheduling this
+ * child node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL
+ * @see RTE_TM_UPDATE_NODE_PARENT_CHANGE_LEVEL
+ */
+int
+rte_tm_node_parent_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node private shaper update
+ *
+ * Restriction for the root node: its private shaper profile needs to be valid
+ * and single rate.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[in] shaper_profile_id
+ * Shaper profile ID for the private shaper of the current node. Needs to be
+ * either valid shaper profile ID or RTE_TM_SHAPER_PROFILE_ID_NONE, with
+ * the latter disabling the private shaper of the current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_private_n_max
+ */
+int
+rte_tm_node_shaper_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node shared shapers update
+ *
+ * Restriction for root node: cannot use any shared rate shapers.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[in] shared_shaper_id
+ * Shared shaper ID. Needs to be valid.
+ * @param[in] add
+ * Set to non-zero value to add this shared shaper to current node or to zero
+ * to delete this shared shaper from current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::shaper_shared_n_max
+ */
+int
+rte_tm_node_shared_shaper_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shared_shaper_id,
+ int add,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node enabled statistics counters update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[in] stats_mask
+ * Mask of statistics counter types to be enabled for the current node. This
+ * needs to be a subset of the statistics counter types available for the
+ * current node. Any statistics counter type not included in this set is to
+ * be disabled for the current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see enum rte_tm_stats_type
+ * @see RTE_TM_UPDATE_NODE_STATS
+ */
+int
+rte_tm_node_stats_update(uint8_t port_id,
+ uint32_t node_id,
+ uint64_t stats_mask,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node WFQ weight mode update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid leaf node ID.
+ * @param[in] wfq_weight_mode
+ * WFQ weight mode for each SP priority. When NULL, it indicates that WFQ is
+ * to be used for all priorities. When non-NULL, it points to a pre-allocated
+ * array of *n_sp_priorities* values, with non-zero value for byte-mode and
+ * zero for packet-mode.
+ * @param[in] n_sp_priorities
+ * Number of SP priorities.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see RTE_TM_UPDATE_NODE_WFQ_WEIGHT_MODE
+ * @see RTE_TM_UPDATE_NODE_N_SP_PRIORITIES
+ */
+int
+rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
+ uint32_t node_id,
+ int *wfq_weight_mode,
+ uint32_t n_sp_priorities,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node congestion management mode update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid leaf node ID.
+ * @param[in] cman
+ * Congestion management mode.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see RTE_TM_UPDATE_NODE_CMAN
+ */
+int
+rte_tm_node_cman_update(uint8_t port_id,
+ uint32_t node_id,
+ enum rte_tm_cman_mode cman,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node private WRED context update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid leaf node ID.
+ * @param[in] wred_profile_id
+ * WRED profile ID for the private WRED context of the current node. Needs to
+ * be either valid WRED profile ID or RTE_TM_WRED_PROFILE_ID_NONE, with the
+ * latter disabling the private WRED context of the current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_private_n_max
+*/
+int
+rte_tm_node_wred_context_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node shared WRED context update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid leaf node ID.
+ * @param[in] shared_wred_context_id
+ * Shared WRED context ID. Needs to be valid.
+ * @param[in] add
+ * Set to non-zero value to add this shared WRED context to current node or
+ * to zero to delete this shared WRED context from current node.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
+ */
+int
+rte_tm_node_shared_wred_context_update(uint8_t port_id,
+ uint32_t node_id,
+ uint32_t shared_wred_context_id,
+ int add,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager node statistics counters read
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] node_id
+ * Node ID. Needs to be valid.
+ * @param[out] stats
+ * When non-NULL, it contains the current value for the statistics counters
+ * enabled for the current node.
+ * @param[out] stats_mask
+ * When non-NULL, it contains the mask of statistics counter types that are
+ * currently enabled for this node, indicating which of the counters
+ * retrieved with the *stats* structure are valid.
+ * @param[in] clear
+ * When this parameter has a non-zero value, the statistics counters are
+ * cleared (i.e. set to zero) immediately after they have been read,
+ * otherwise the statistics counters are left untouched.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see enum rte_tm_stats_type
+ */
+int
+rte_tm_node_stats_read(uint8_t port_id,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager packet marking - VLAN DEI (IEEE 802.1Q)
+ *
+ * IEEE 802.1p maps the traffic class to the VLAN Priority Code Point (PCP)
+ * field (3 bits), while IEEE 802.1q maps the drop priority to the VLAN Drop
+ * Eligible Indicator (DEI) field (1 bit), which was previously named Canonical
+ * Format Indicator (CFI).
+ *
+ * All VLAN frames of a given color get their DEI bit set if marking is enabled
+ * for this color; otherwise, their DEI bit is left as is (either set or not).
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mark_green
+ * Set to non-zero value to enable marking of green packets and to zero to
+ * disable it.
+ * @param[in] mark_yellow
+ * Set to non-zero value to enable marking of yellow packets and to zero to
+ * disable it.
+ * @param[in] mark_red
+ * Set to non-zero value to enable marking of red packets and to zero to
+ * disable it.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::mark_vlan_dei_supported
+ */
+int
+rte_tm_mark_vlan_dei(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager packet marking - IPv4 / IPv6 ECN (IETF RFC 3168)
+ *
+ * IETF RFCs 2474 and 3168 reorganize the IPv4 Type of Service (TOS) field
+ * (8 bits) and the IPv6 Traffic Class (TC) field (8 bits) into Differentiated
+ * Services Codepoint (DSCP) field (6 bits) and Explicit Congestion
+ * Notification (ECN) field (2 bits). The DSCP field is typically used to
+ * encode the traffic class and/or drop priority (RFC 2597), while the ECN
+ * field is used by RFC 3168 to implement a congestion notification mechanism
+ * to be leveraged by transport layer protocols such as TCP and SCTP that have
+ * congestion control mechanisms.
+ *
+ * When congestion is experienced, as alternative to dropping the packet,
+ * routers can change the ECN field of input packets from 2'b01 or 2'b10
+ * (values indicating that source endpoint is ECN-capable) to 2'b11 (meaning
+ * that congestion is experienced). The destination endpoint can use the
+ * ECN-Echo (ECE) TCP flag to relay the congestion indication back to the
+ * source endpoint, which acknowledges it back to the destination endpoint with
+ * the Congestion Window Reduced (CWR) TCP flag.
+ *
+ * All IPv4/IPv6 packets of a given color with ECN set to 2’b01 or 2’b10
+ * carrying TCP or SCTP have their ECN set to 2’b11 if the marking feature is
+ * enabled for the current color, otherwise the ECN field is left as is.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mark_green
+ * Set to non-zero value to enable marking of green packets and to zero to
+ * disable it.
+ * @param[in] mark_yellow
+ * Set to non-zero value to enable marking of yellow packets and to zero to
+ * disable it.
+ * @param[in] mark_red
+ * Set to non-zero value to enable marking of red packets and to zero to
+ * disable it.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::mark_ip_ecn_tcp_supported
+ * @see struct rte_tm_capabilities::mark_ip_ecn_sctp_supported
+ */
+int
+rte_tm_mark_ip_ecn(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+/**
+ * Traffic manager packet marking - IPv4 / IPv6 DSCP (IETF RFC 2597)
+ *
+ * IETF RFC 2597 maps the traffic class and the drop priority to the IPv4/IPv6
+ * Differentiated Services Codepoint (DSCP) field (6 bits). Here are the DSCP
+ * values proposed by this RFC:
+ *
+ * <pre> Class 1 Class 2 Class 3 Class 4 </pre>
+ * <pre> +----------+----------+----------+----------+</pre>
+ * <pre>Low Drop Prec | 001010 | 010010 | 011010 | 100010 |</pre>
+ * <pre>Medium Drop Prec | 001100 | 010100 | 011100 | 100100 |</pre>
+ * <pre>High Drop Prec | 001110 | 010110 | 011110 | 100110 |</pre>
+ * <pre> +----------+----------+----------+----------+</pre>
+ *
+ * There are 4 traffic classes (classes 1 .. 4) encoded by DSCP bits 1 and 2,
+ * as well as 3 drop priorities (low/medium/high) encoded by DSCP bits 3 and 4.
+ *
+ * All IPv4/IPv6 packets have their color marked into DSCP bits 3 and 4 as
+ * follows: green mapped to Low Drop Precedence (2’b01), yellow to Medium
+ * (2’b10) and red to High (2’b11). Marking needs to be explicitly enabled
+ * for each color; when not enabled for a given color, the DSCP field of all
+ * packets with that color is left as is.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mark_green
+ * Set to non-zero value to enable marking of green packets and to zero to
+ * disable it.
+ * @param[in] mark_yellow
+ * Set to non-zero value to enable marking of yellow packets and to zero to
+ * disable it.
+ * @param[in] mark_red
+ * Set to non-zero value to enable marking of red packets and to zero to
+ * disable it.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see struct rte_tm_capabilities::mark_ip_dscp_supported
+ */
+int
+rte_tm_mark_ip_dscp(uint8_t port_id,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_TM_H__ */
diff --git a/lib/librte_ether/rte_tm_driver.h b/lib/librte_ether/rte_tm_driver.h
new file mode 100644
index 00000000..a5b698fe
--- /dev/null
+++ b/lib/librte_ether/rte_tm_driver.h
@@ -0,0 +1,366 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TM_DRIVER_H__
+#define __INCLUDE_RTE_TM_DRIVER_H__
+
+/**
+ * @file
+ * RTE Generic Traffic Manager API (Driver Side)
+ *
+ * This file provides implementation helpers for internal use by PMDs, they
+ * are not intended to be exposed to applications and are not subject to ABI
+ * versioning.
+ */
+
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include "rte_ethdev.h"
+#include "rte_tm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @internal Traffic manager node ID validate and type get */
+typedef int (*rte_tm_node_type_get_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager capabilities get */
+typedef int (*rte_tm_capabilities_get_t)(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager level capabilities get */
+typedef int (*rte_tm_level_capabilities_get_t)(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node capabilities get */
+typedef int (*rte_tm_node_capabilities_get_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager WRED profile add */
+typedef int (*rte_tm_wred_profile_add_t)(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager WRED profile delete */
+typedef int (*rte_tm_wred_profile_delete_t)(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shared WRED context add */
+typedef int (*rte_tm_shared_wred_context_add_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t shared_wred_context_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shared WRED context delete */
+typedef int (*rte_tm_shared_wred_context_delete_t)(
+ struct rte_eth_dev *dev,
+ uint32_t shared_wred_context_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shaper profile add */
+typedef int (*rte_tm_shaper_profile_add_t)(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shaper profile delete */
+typedef int (*rte_tm_shaper_profile_delete_t)(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shared shaper add/update */
+typedef int (*rte_tm_shared_shaper_add_update_t)(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager shared shaper delete */
+typedef int (*rte_tm_shared_shaper_delete_t)(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node add */
+typedef int (*rte_tm_node_add_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node delete */
+typedef int (*rte_tm_node_delete_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node suspend */
+typedef int (*rte_tm_node_suspend_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node resume */
+typedef int (*rte_tm_node_resume_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager hierarchy commit */
+typedef int (*rte_tm_hierarchy_commit_t)(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node parent update */
+typedef int (*rte_tm_node_parent_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node shaper update */
+typedef int (*rte_tm_node_shaper_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node shaper update */
+typedef int (*rte_tm_node_shared_shaper_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shared_shaper_id,
+ int32_t add,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node stats update */
+typedef int (*rte_tm_node_stats_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint64_t stats_mask,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node WFQ weight mode update */
+typedef int (*rte_tm_node_wfq_weight_mode_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t node_id,
+ int *wfq_weigth_mode,
+ uint32_t n_sp_priorities,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node congestion management mode update */
+typedef int (*rte_tm_node_cman_update_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ enum rte_tm_cman_mode cman,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node WRED context update */
+typedef int (*rte_tm_node_wred_context_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager node WRED context update */
+typedef int (*rte_tm_node_shared_wred_context_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shared_wred_context_id,
+ int add,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager read stats counters for specific node */
+typedef int (*rte_tm_node_stats_read_t)(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager packet marking - VLAN DEI */
+typedef int (*rte_tm_mark_vlan_dei_t)(struct rte_eth_dev *dev,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager packet marking - IPv4/IPv6 ECN */
+typedef int (*rte_tm_mark_ip_ecn_t)(struct rte_eth_dev *dev,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+/** @internal Traffic manager packet marking - IPv4/IPv6 DSCP */
+typedef int (*rte_tm_mark_ip_dscp_t)(struct rte_eth_dev *dev,
+ int mark_green,
+ int mark_yellow,
+ int mark_red,
+ struct rte_tm_error *error);
+
+struct rte_tm_ops {
+ /** Traffic manager node type get */
+ rte_tm_node_type_get_t node_type_get;
+
+ /** Traffic manager capabilities_get */
+ rte_tm_capabilities_get_t capabilities_get;
+ /** Traffic manager level capabilities_get */
+ rte_tm_level_capabilities_get_t level_capabilities_get;
+ /** Traffic manager node capabilities get */
+ rte_tm_node_capabilities_get_t node_capabilities_get;
+
+ /** Traffic manager WRED profile add */
+ rte_tm_wred_profile_add_t wred_profile_add;
+ /** Traffic manager WRED profile delete */
+ rte_tm_wred_profile_delete_t wred_profile_delete;
+ /** Traffic manager shared WRED context add/update */
+ rte_tm_shared_wred_context_add_update_t
+ shared_wred_context_add_update;
+ /** Traffic manager shared WRED context delete */
+ rte_tm_shared_wred_context_delete_t
+ shared_wred_context_delete;
+
+ /** Traffic manager shaper profile add */
+ rte_tm_shaper_profile_add_t shaper_profile_add;
+ /** Traffic manager shaper profile delete */
+ rte_tm_shaper_profile_delete_t shaper_profile_delete;
+ /** Traffic manager shared shaper add/update */
+ rte_tm_shared_shaper_add_update_t shared_shaper_add_update;
+ /** Traffic manager shared shaper delete */
+ rte_tm_shared_shaper_delete_t shared_shaper_delete;
+
+ /** Traffic manager node add */
+ rte_tm_node_add_t node_add;
+ /** Traffic manager node delete */
+ rte_tm_node_delete_t node_delete;
+ /** Traffic manager node suspend */
+ rte_tm_node_suspend_t node_suspend;
+ /** Traffic manager node resume */
+ rte_tm_node_resume_t node_resume;
+ /** Traffic manager hierarchy commit */
+ rte_tm_hierarchy_commit_t hierarchy_commit;
+
+ /** Traffic manager node parent update */
+ rte_tm_node_parent_update_t node_parent_update;
+ /** Traffic manager node shaper update */
+ rte_tm_node_shaper_update_t node_shaper_update;
+ /** Traffic manager node shared shaper update */
+ rte_tm_node_shared_shaper_update_t node_shared_shaper_update;
+ /** Traffic manager node stats update */
+ rte_tm_node_stats_update_t node_stats_update;
+ /** Traffic manager node WFQ weight mode update */
+ rte_tm_node_wfq_weight_mode_update_t node_wfq_weight_mode_update;
+ /** Traffic manager node congestion management mode update */
+ rte_tm_node_cman_update_t node_cman_update;
+ /** Traffic manager node WRED context update */
+ rte_tm_node_wred_context_update_t node_wred_context_update;
+ /** Traffic manager node shared WRED context update */
+ rte_tm_node_shared_wred_context_update_t
+ node_shared_wred_context_update;
+ /** Traffic manager read statistics counters for current node */
+ rte_tm_node_stats_read_t node_stats_read;
+
+ /** Traffic manager packet marking - VLAN DEI */
+ rte_tm_mark_vlan_dei_t mark_vlan_dei;
+ /** Traffic manager packet marking - IPv4/IPv6 ECN */
+ rte_tm_mark_ip_ecn_t mark_ip_ecn;
+ /** Traffic manager packet marking - IPv4/IPv6 DSCP */
+ rte_tm_mark_ip_dscp_t mark_ip_dscp;
+};
+
+/**
+ * Initialize generic error structure.
+ *
+ * This function also sets rte_errno to a given value.
+ *
+ * @param[out] error
+ * Pointer to error structure (may be NULL).
+ * @param[in] code
+ * Related error code (rte_errno).
+ * @param[in] type
+ * Cause field and error type.
+ * @param[in] cause
+ * Object responsible for the error.
+ * @param[in] message
+ * Human-readable error message.
+ *
+ * @return
+ * Error code.
+ */
+static inline int
+rte_tm_error_set(struct rte_tm_error *error,
+ int code,
+ enum rte_tm_error_type type,
+ const void *cause,
+ const char *message)
+{
+ if (error) {
+ *error = (struct rte_tm_error){
+ .type = type,
+ .cause = cause,
+ .message = message,
+ };
+ }
+ rte_errno = code;
+ return code;
+}
+
+/**
+ * Get generic traffic manager operations structure from a port
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[out] error
+ * Error details
+ *
+ * @return
+ * The traffic manager operations structure associated with port_id on
+ * success, NULL otherwise.
+ */
+const struct rte_tm_ops *
+rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_TM_DRIVER_H__ */