summaryrefslogtreecommitdiffstats
path: root/drivers/net/softnic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/softnic')
-rw-r--r--drivers/net/softnic/Makefile60
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c851
-rw-r--r--drivers/net/softnic/rte_eth_softnic.h83
-rw-r--r--drivers/net/softnic/rte_eth_softnic_internals.h291
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tm.c3452
-rw-r--r--drivers/net/softnic/rte_pmd_eth_softnic_version.map7
6 files changed, 4744 insertions, 0 deletions
diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
new file mode 100644
index 00000000..09ed62ea
--- /dev/null
+++ b/drivers/net/softnic/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_softnic.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_eth_softnic_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_softnic.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
new file mode 100644
index 00000000..3e47c2f9
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -0,0 +1,851 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_sched.h>
+#include <rte_tm_driver.h>
+
+#include "rte_eth_softnic.h"
+#include "rte_eth_softnic_internals.h"
+
+#define DEV_HARD(p) \
+ (&rte_eth_devices[p->hard.port_id])
+
+#define PMD_PARAM_SOFT_TM "soft_tm"
+#define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate"
+#define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues"
+#define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0"
+#define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1"
+#define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2"
+#define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3"
+#define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
+#define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
+
+#define PMD_PARAM_HARD_NAME "hard_name"
+#define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
+
+static const char *pmd_valid_args[] = {
+ PMD_PARAM_SOFT_TM,
+ PMD_PARAM_SOFT_TM_RATE,
+ PMD_PARAM_SOFT_TM_NB_QUEUES,
+ PMD_PARAM_SOFT_TM_QSIZE0,
+ PMD_PARAM_SOFT_TM_QSIZE1,
+ PMD_PARAM_SOFT_TM_QSIZE2,
+ PMD_PARAM_SOFT_TM_QSIZE3,
+ PMD_PARAM_SOFT_TM_ENQ_BSZ,
+ PMD_PARAM_SOFT_TM_DEQ_BSZ,
+ PMD_PARAM_HARD_NAME,
+ PMD_PARAM_HARD_TX_QUEUE_ID,
+ NULL
+};
+
+static const struct rte_eth_dev_info pmd_dev_info = {
+ .min_rx_bufsize = 0,
+ .max_rx_pktlen = UINT32_MAX,
+ .max_rx_queues = UINT16_MAX,
+ .max_tx_queues = UINT16_MAX,
+ .rx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ },
+ .tx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ },
+};
+
+static void
+pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *dev_info)
+{
+ memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
+}
+
+static int
+pmd_dev_configure(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_eth_dev *hard_dev = DEV_HARD(p);
+
+ if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
+ return -1;
+
+ if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
+ return -1;
+
+ return 0;
+}
+
+static int
+pmd_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool __rte_unused)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ if (p->params.soft.intrusive == 0) {
+ struct pmd_rx_queue *rxq;
+
+ rxq = rte_zmalloc_socket(p->params.soft.name,
+ sizeof(struct pmd_rx_queue), 0, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+
+ rxq->hard.port_id = p->hard.port_id;
+ rxq->hard.rx_queue_id = rx_queue_id;
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ } else {
+ struct rte_eth_dev *hard_dev = DEV_HARD(p);
+ void *rxq = hard_dev->data->rx_queues[rx_queue_id];
+
+ if (rxq == NULL)
+ return -1;
+
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ }
+ return 0;
+}
+
+static int
+pmd_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
+ char name[size];
+ struct rte_ring *r;
+
+ snprintf(name, sizeof(name), "%s_txq%04x",
+ dev->data->name, tx_queue_id);
+ r = rte_ring_create(name, nb_tx_desc, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (r == NULL)
+ return -1;
+
+ dev->data->tx_queues[tx_queue_id] = r;
+ return 0;
+}
+
+static int
+pmd_dev_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ if (tm_used(dev)) {
+ int status = tm_start(p);
+
+ if (status)
+ return status;
+ }
+
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ if (p->params.soft.intrusive) {
+ struct rte_eth_dev *hard_dev = DEV_HARD(p);
+
+ /* The hard_dev->rx_pkt_burst should be stable by now */
+ dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
+ }
+
+ return 0;
+}
+
+static void
+pmd_dev_stop(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ if (tm_used(dev))
+ tm_stop(p);
+}
+
+static void
+pmd_dev_close(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+
+ /* TX queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
+}
+
+static int
+pmd_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static int
+pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
+{
+ *(const struct rte_tm_ops **)arg =
+ (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
+
+ return 0;
+}
+
+static const struct eth_dev_ops pmd_ops = {
+ .dev_configure = pmd_dev_configure,
+ .dev_start = pmd_dev_start,
+ .dev_stop = pmd_dev_stop,
+ .dev_close = pmd_dev_close,
+ .link_update = pmd_link_update,
+ .dev_infos_get = pmd_dev_infos_get,
+ .rx_queue_setup = pmd_rx_queue_setup,
+ .tx_queue_setup = pmd_tx_queue_setup,
+ .tm_ops_get = pmd_tm_ops_get,
+};
+
+static uint16_t
+pmd_rx_pkt_burst(void *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct pmd_rx_queue *rx_queue = rxq;
+
+ return rte_eth_rx_burst(rx_queue->hard.port_id,
+ rx_queue->hard.rx_queue_id,
+ rx_pkts,
+ nb_pkts);
+}
+
+static uint16_t
+pmd_tx_pkt_burst(void *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ return (uint16_t)rte_ring_enqueue_burst(txq,
+ (void **)tx_pkts,
+ nb_pkts,
+ NULL);
+}
+
+static __rte_always_inline int
+run_default(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* Persistent context: Read Only (update not required) */
+ struct rte_mbuf **pkts = p->soft.def.pkts;
+ uint16_t nb_tx_queues = dev->data->nb_tx_queues;
+
+ /* Persistent context: Read - Write (update required) */
+ uint32_t txq_pos = p->soft.def.txq_pos;
+ uint32_t pkts_len = p->soft.def.pkts_len;
+ uint32_t flush_count = p->soft.def.flush_count;
+
+ /* Not part of the persistent context */
+ uint32_t pos;
+ uint16_t i;
+
+ /* Soft device TXQ read, Hard device TXQ write */
+ for (i = 0; i < nb_tx_queues; i++) {
+ struct rte_ring *txq = dev->data->tx_queues[txq_pos];
+
+ /* Read soft device TXQ burst to packet enqueue buffer */
+ pkts_len += rte_ring_sc_dequeue_burst(txq,
+ (void **)&pkts[pkts_len],
+ DEFAULT_BURST_SIZE,
+ NULL);
+
+ /* Increment soft device TXQ */
+ txq_pos++;
+ if (txq_pos >= nb_tx_queues)
+ txq_pos = 0;
+
+ /* Hard device TXQ write when complete burst is available */
+ if (pkts_len >= DEFAULT_BURST_SIZE) {
+ for (pos = 0; pos < pkts_len; )
+ pos += rte_eth_tx_burst(p->hard.port_id,
+ p->params.hard.tx_queue_id,
+ &pkts[pos],
+ (uint16_t)(pkts_len - pos));
+
+ pkts_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+
+ if (flush_count >= FLUSH_COUNT_THRESHOLD) {
+ for (pos = 0; pos < pkts_len; )
+ pos += rte_eth_tx_burst(p->hard.port_id,
+ p->params.hard.tx_queue_id,
+ &pkts[pos],
+ (uint16_t)(pkts_len - pos));
+
+ pkts_len = 0;
+ flush_count = 0;
+ }
+
+ p->soft.def.txq_pos = txq_pos;
+ p->soft.def.pkts_len = pkts_len;
+ p->soft.def.flush_count = flush_count + 1;
+
+ return 0;
+}
+
+static __rte_always_inline int
+run_tm(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* Persistent context: Read Only (update not required) */
+ struct rte_sched_port *sched = p->soft.tm.sched;
+ struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
+ struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
+ uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
+ uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
+ uint16_t nb_tx_queues = dev->data->nb_tx_queues;
+
+ /* Persistent context: Read - Write (update required) */
+ uint32_t txq_pos = p->soft.tm.txq_pos;
+ uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
+ uint32_t flush_count = p->soft.tm.flush_count;
+
+ /* Not part of the persistent context */
+ uint32_t pkts_deq_len, pos;
+ uint16_t i;
+
+ /* Soft device TXQ read, TM enqueue */
+ for (i = 0; i < nb_tx_queues; i++) {
+ struct rte_ring *txq = dev->data->tx_queues[txq_pos];
+
+ /* Read TXQ burst to packet enqueue buffer */
+ pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
+ (void **)&pkts_enq[pkts_enq_len],
+ enq_bsz,
+ NULL);
+
+ /* Increment TXQ */
+ txq_pos++;
+ if (txq_pos >= nb_tx_queues)
+ txq_pos = 0;
+
+ /* TM enqueue when complete burst is available */
+ if (pkts_enq_len >= enq_bsz) {
+ rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+
+ if (flush_count >= FLUSH_COUNT_THRESHOLD) {
+ if (pkts_enq_len)
+ rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+
+ p->soft.tm.txq_pos = txq_pos;
+ p->soft.tm.pkts_enq_len = pkts_enq_len;
+ p->soft.tm.flush_count = flush_count + 1;
+
+ /* TM dequeue, Hard device TXQ write */
+ pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
+
+ for (pos = 0; pos < pkts_deq_len; )
+ pos += rte_eth_tx_burst(p->hard.port_id,
+ p->params.hard.tx_queue_id,
+ &pkts_deq[pos],
+ (uint16_t)(pkts_deq_len - pos));
+
+ return 0;
+}
+
+int
+rte_pmd_softnic_run(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
+}
+
+static struct ether_addr eth_addr = { .addr_bytes = {0} };
+
+static uint32_t
+eth_dev_speed_max_mbps(uint32_t speed_capa)
+{
+ uint32_t rate_mbps[32] = {
+ ETH_SPEED_NUM_NONE,
+ ETH_SPEED_NUM_10M,
+ ETH_SPEED_NUM_10M,
+ ETH_SPEED_NUM_100M,
+ ETH_SPEED_NUM_100M,
+ ETH_SPEED_NUM_1G,
+ ETH_SPEED_NUM_2_5G,
+ ETH_SPEED_NUM_5G,
+ ETH_SPEED_NUM_10G,
+ ETH_SPEED_NUM_20G,
+ ETH_SPEED_NUM_25G,
+ ETH_SPEED_NUM_40G,
+ ETH_SPEED_NUM_50G,
+ ETH_SPEED_NUM_56G,
+ ETH_SPEED_NUM_100G,
+ };
+
+ uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
+ return rate_mbps[pos];
+}
+
+static int
+default_init(struct pmd_internals *p,
+ struct pmd_params *params,
+ int numa_node)
+{
+ p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
+ 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+
+ if (p->soft.def.pkts == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+default_free(struct pmd_internals *p)
+{
+ rte_free(p->soft.def.pkts);
+}
+
+static void *
+pmd_init(struct pmd_params *params, int numa_node)
+{
+ struct pmd_internals *p;
+ int status;
+
+ p = rte_zmalloc_socket(params->soft.name,
+ sizeof(struct pmd_internals),
+ 0,
+ numa_node);
+ if (p == NULL)
+ return NULL;
+
+ memcpy(&p->params, params, sizeof(p->params));
+ rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
+
+ /* Default */
+ status = default_init(p, params, numa_node);
+ if (status) {
+ free(p->params.hard.name);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Traffic Management (TM)*/
+ if (params->soft.flags & PMD_FEATURE_TM) {
+ status = tm_init(p, params, numa_node);
+ if (status) {
+ default_free(p);
+ free(p->params.hard.name);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ return p;
+}
+
+static void
+pmd_free(struct pmd_internals *p)
+{
+ if (p->params.soft.flags & PMD_FEATURE_TM)
+ tm_free(p);
+
+ default_free(p);
+
+ free(p->params.hard.name);
+ rte_free(p);
+}
+
+static int
+pmd_ethdev_register(struct rte_vdev_device *vdev,
+ struct pmd_params *params,
+ void *dev_private)
+{
+ struct rte_eth_dev_info hard_info;
+ struct rte_eth_dev *soft_dev;
+ uint32_t hard_speed;
+ int numa_node;
+ uint16_t hard_port_id;
+
+ rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
+ rte_eth_dev_info_get(hard_port_id, &hard_info);
+ hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
+ numa_node = rte_eth_dev_socket_id(hard_port_id);
+
+ /* Ethdev entry allocation */
+ soft_dev = rte_eth_dev_allocate(params->soft.name);
+ if (!soft_dev)
+ return -ENOMEM;
+
+ /* dev */
+ soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
+ NULL : /* set up later */
+ pmd_rx_pkt_burst;
+ soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
+ soft_dev->tx_pkt_prepare = NULL;
+ soft_dev->dev_ops = &pmd_ops;
+ soft_dev->device = &vdev->device;
+
+ /* dev->data */
+ soft_dev->data->dev_private = dev_private;
+ soft_dev->data->dev_link.link_speed = hard_speed;
+ soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ soft_dev->data->dev_link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ soft_dev->data->mac_addrs = &eth_addr;
+ soft_dev->data->promiscuous = 1;
+ soft_dev->data->kdrv = RTE_KDRV_NONE;
+ soft_dev->data->numa_node = numa_node;
+
+ return 0;
+}
+
+static int
+get_string(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int
+get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint32_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+
+static int
+pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ int i, ret;
+
+ kvlist = rte_kvargs_parse(params, pmd_valid_args);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ /* Set default values */
+ memset(p, 0, sizeof(*p));
+ p->soft.name = name;
+ p->soft.intrusive = INTRUSIVE;
+ p->soft.tm.rate = 0;
+ p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
+ p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
+ p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
+ p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
+
+ /* SOFT: TM (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
+ char *s;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
+ &get_string, &s);
+ if (ret < 0)
+ goto out_free;
+
+ if (strcmp(s, "on") == 0)
+ p->soft.flags |= PMD_FEATURE_TM;
+ else if (strcmp(s, "off") == 0)
+ p->soft.flags &= ~PMD_FEATURE_TM;
+ else
+ ret = -EINVAL;
+
+ free(s);
+ if (ret)
+ goto out_free;
+ }
+
+ /* SOFT: TM rate (measured in bytes/second) (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
+ &get_uint32, &p->soft.tm.rate);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM number of queues (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
+ &get_uint32, &p->soft.tm.nb_queues);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM queue size 0 .. 3 (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[0] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[1] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[2] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[3] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM enqueue burst size (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
+ &get_uint32, &p->soft.tm.enq_bsz);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM dequeue burst size (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
+ &get_uint32, &p->soft.tm.deq_bsz);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* HARD: name (mandatory) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
+ &get_string, &p->hard.name);
+ if (ret < 0)
+ goto out_free;
+ } else {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ /* HARD: tx_queue_id (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
+ &get_uint32, &p->hard.tx_queue_id);
+ if (ret < 0)
+ goto out_free;
+ }
+
+out_free:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+pmd_probe(struct rte_vdev_device *vdev)
+{
+ struct pmd_params p;
+ const char *params;
+ int status;
+
+ struct rte_eth_dev_info hard_info;
+ uint32_t hard_speed;
+ uint16_t hard_port_id;
+ int numa_node;
+ void *dev_private;
+
+ RTE_LOG(INFO, PMD,
+ "Probing device \"%s\"\n",
+ rte_vdev_device_name(vdev));
+
+ /* Parse input arguments */
+ params = rte_vdev_device_args(vdev);
+ if (!params)
+ return -EINVAL;
+
+ status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
+ if (status)
+ return status;
+
+ /* Check input arguments */
+ if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
+ return -EINVAL;
+
+ rte_eth_dev_info_get(hard_port_id, &hard_info);
+ hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
+ numa_node = rte_eth_dev_socket_id(hard_port_id);
+
+ if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
+ return -EINVAL;
+
+ if (p.soft.flags & PMD_FEATURE_TM) {
+ status = tm_params_check(&p, hard_speed);
+
+ if (status)
+ return status;
+ }
+
+ /* Allocate and initialize soft ethdev private data */
+ dev_private = pmd_init(&p, numa_node);
+ if (dev_private == NULL)
+ return -ENOMEM;
+
+ /* Register soft ethdev */
+ RTE_LOG(INFO, PMD,
+ "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
+ p.soft.name, p.hard.name);
+
+ status = pmd_ethdev_register(vdev, &p, dev_private);
+ if (status) {
+ pmd_free(dev_private);
+ return status;
+ }
+
+ return 0;
+}
+
+static int
+pmd_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *dev = NULL;
+ struct pmd_internals *p;
+
+ if (!vdev)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
+ rte_vdev_device_name(vdev));
+
+ /* Find the ethdev entry */
+ dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
+ if (dev == NULL)
+ return -ENODEV;
+ p = dev->data->dev_private;
+
+ /* Free device data structures*/
+ pmd_free(p);
+ rte_free(dev->data);
+ rte_eth_dev_release_port(dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_softnic_drv = {
+ .probe = pmd_probe,
+ .remove = pmd_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
+ PMD_PARAM_SOFT_TM "=on|off "
+ PMD_PARAM_SOFT_TM_RATE "=<int> "
+ PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
+ PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
+ PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
+ PMD_PARAM_HARD_NAME "=<string> "
+ PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");
diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h
new file mode 100644
index 00000000..b49e5829
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic.h
@@ -0,0 +1,83 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_ETH_SOFTNIC_H__
+#define __INCLUDE_RTE_ETH_SOFTNIC_H__
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef SOFTNIC_SOFT_TM_NB_QUEUES
+#define SOFTNIC_SOFT_TM_NB_QUEUES 65536
+#endif
+
+#ifndef SOFTNIC_SOFT_TM_QUEUE_SIZE
+#define SOFTNIC_SOFT_TM_QUEUE_SIZE 64
+#endif
+
+#ifndef SOFTNIC_SOFT_TM_ENQ_BSZ
+#define SOFTNIC_SOFT_TM_ENQ_BSZ 32
+#endif
+
+#ifndef SOFTNIC_SOFT_TM_DEQ_BSZ
+#define SOFTNIC_SOFT_TM_DEQ_BSZ 24
+#endif
+
+#ifndef SOFTNIC_HARD_TX_QUEUE_ID
+#define SOFTNIC_HARD_TX_QUEUE_ID 0
+#endif
+
+/**
+ * Run the traffic management function on the softnic device
+ *
+ * This function read the packets from the softnic input queues, insert into
+ * QoS scheduler queues based on mbuf sched field value and transmit the
+ * scheduled packets out through the hard device interface.
+ *
+ * @param portid
+ * port id of the soft device.
+ * @return
+ * zero.
+ */
+
+int
+rte_pmd_softnic_run(uint16_t port_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_ETH_SOFTNIC_H__ */
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
new file mode 100644
index 00000000..1f758069
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -0,0 +1,291 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
+#define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_sched.h>
+#include <rte_ethdev.h>
+#include <rte_tm_driver.h>
+
+#include "rte_eth_softnic.h"
+
+/**
+ * PMD Parameters
+ */
+
+enum pmd_feature {
+ PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
+};
+
+#ifndef INTRUSIVE
+#define INTRUSIVE 0
+#endif
+
+struct pmd_params {
+ /** Parameters for the soft device (to be created) */
+ struct {
+ const char *name; /**< Name */
+ uint32_t flags; /**< Flags */
+
+ /** 0 = Access hard device though API only (potentially slower,
+ * but safer);
+ * 1 = Access hard device private data structures is allowed
+ * (potentially faster).
+ */
+ int intrusive;
+
+ /** Traffic Management (TM) */
+ struct {
+ uint32_t rate; /**< Rate (bytes/second) */
+ uint32_t nb_queues; /**< Number of queues */
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ /**< Queue size per traffic class */
+ uint32_t enq_bsz; /**< Enqueue burst size */
+ uint32_t deq_bsz; /**< Dequeue burst size */
+ } tm;
+ } soft;
+
+ /** Parameters for the hard device (existing) */
+ struct {
+ char *name; /**< Name */
+ uint16_t tx_queue_id; /**< TX queue ID */
+ } hard;
+};
+
+/**
+ * Default Internals
+ */
+
+#ifndef DEFAULT_BURST_SIZE
+#define DEFAULT_BURST_SIZE 32
+#endif
+
+#ifndef FLUSH_COUNT_THRESHOLD
+#define FLUSH_COUNT_THRESHOLD (1 << 17)
+#endif
+
+struct default_internals {
+ struct rte_mbuf **pkts;
+ uint32_t pkts_len;
+ uint32_t txq_pos;
+ uint32_t flush_count;
+};
+
+/**
+ * Traffic Management (TM) Internals
+ */
+
+#ifndef TM_MAX_SUBPORTS
+#define TM_MAX_SUBPORTS 8
+#endif
+
+#ifndef TM_MAX_PIPES_PER_SUBPORT
+#define TM_MAX_PIPES_PER_SUBPORT 4096
+#endif
+
+struct tm_params {
+ struct rte_sched_port_params port_params;
+
+ struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
+
+ struct rte_sched_pipe_params
+ pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+ uint32_t n_pipe_profiles;
+ uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
+};
+
+/* TM Levels */
+enum tm_node_level {
+ TM_NODE_LEVEL_PORT = 0,
+ TM_NODE_LEVEL_SUBPORT,
+ TM_NODE_LEVEL_PIPE,
+ TM_NODE_LEVEL_TC,
+ TM_NODE_LEVEL_QUEUE,
+ TM_NODE_LEVEL_MAX,
+};
+
+/* TM Shaper Profile */
+struct tm_shaper_profile {
+ TAILQ_ENTRY(tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t n_users;
+ struct rte_tm_shaper_params params;
+};
+
+TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile);
+
+/* TM Shared Shaper */
+struct tm_shared_shaper {
+ TAILQ_ENTRY(tm_shared_shaper) node;
+ uint32_t shared_shaper_id;
+ uint32_t n_users;
+ uint32_t shaper_profile_id;
+};
+
+TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper);
+
+/* TM WRED Profile */
+struct tm_wred_profile {
+ TAILQ_ENTRY(tm_wred_profile) node;
+ uint32_t wred_profile_id;
+ uint32_t n_users;
+ struct rte_tm_wred_params params;
+};
+
+TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile);
+
+/* TM Node */
+struct tm_node {
+ TAILQ_ENTRY(tm_node) node;
+ uint32_t node_id;
+ uint32_t parent_node_id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t level;
+ struct tm_node *parent_node;
+ struct tm_shaper_profile *shaper_profile;
+ struct tm_wred_profile *wred_profile;
+ struct rte_tm_node_params params;
+ struct rte_tm_node_stats stats;
+ uint32_t n_children;
+};
+
+TAILQ_HEAD(tm_node_list, tm_node);
+
+/* TM Hierarchy Specification */
+struct tm_hierarchy {
+ struct tm_shaper_profile_list shaper_profiles;
+ struct tm_shared_shaper_list shared_shapers;
+ struct tm_wred_profile_list wred_profiles;
+ struct tm_node_list nodes;
+
+ uint32_t n_shaper_profiles;
+ uint32_t n_shared_shapers;
+ uint32_t n_wred_profiles;
+ uint32_t n_nodes;
+
+ uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
+};
+
+struct tm_internals {
+ /** Hierarchy specification
+ *
+ * -Hierarchy is unfrozen at init and when port is stopped.
+ * -Hierarchy is frozen on successful hierarchy commit.
+ * -Run-time hierarchy changes are not allowed, therefore it makes
+ * sense to keep the hierarchy frozen after the port is started.
+ */
+ struct tm_hierarchy h;
+ int hierarchy_frozen;
+
+ /** Blueprints */
+ struct tm_params params;
+
+ /** Run-time */
+ struct rte_sched_port *sched;
+ struct rte_mbuf **pkts_enq;
+ struct rte_mbuf **pkts_deq;
+ uint32_t pkts_enq_len;
+ uint32_t txq_pos;
+ uint32_t flush_count;
+};
+
+/**
+ * PMD Internals
+ */
+struct pmd_internals {
+ /** Params */
+ struct pmd_params params;
+
+ /** Soft device */
+ struct {
+ struct default_internals def; /**< Default */
+ struct tm_internals tm; /**< Traffic Management */
+ } soft;
+
+ /** Hard device */
+ struct {
+ uint16_t port_id;
+ } hard;
+};
+
+struct pmd_rx_queue {
+ /** Hard device */
+ struct {
+ uint16_t port_id;
+ uint16_t rx_queue_id;
+ } hard;
+};
+
+/**
+ * Traffic Management (TM) Operation
+ */
+extern const struct rte_tm_ops pmd_tm_ops;
+
+int
+tm_params_check(struct pmd_params *params, uint32_t hard_rate);
+
+int
+tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
+
+void
+tm_free(struct pmd_internals *p);
+
+int
+tm_start(struct pmd_internals *p);
+
+void
+tm_stop(struct pmd_internals *p);
+
+static inline int
+tm_enabled(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ return (p->params.soft.flags & PMD_FEATURE_TM);
+}
+
+static inline int
+tm_used(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ return (p->params.soft.flags & PMD_FEATURE_TM) &&
+ p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
+}
+
+#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
new file mode 100644
index 00000000..dbb25143
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -0,0 +1,3452 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "rte_eth_softnic.h"
+
+#define BYTES_IN_MBPS (1000 * 1000 / 8)
+#define SUBPORT_TC_PERIOD 10
+#define PIPE_TC_PERIOD 40
+
+int
+tm_params_check(struct pmd_params *params, uint32_t hard_rate)
+{
+ uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
+ uint32_t i;
+
+ /* rate */
+ if (params->soft.tm.rate) {
+ if (params->soft.tm.rate > hard_rate_bytes_per_sec)
+ return -EINVAL;
+ } else {
+ params->soft.tm.rate =
+ (hard_rate_bytes_per_sec > UINT32_MAX) ?
+ UINT32_MAX : hard_rate_bytes_per_sec;
+ }
+
+ /* nb_queues */
+ if (params->soft.tm.nb_queues == 0)
+ return -EINVAL;
+
+ if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
+ params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
+
+ params->soft.tm.nb_queues =
+ rte_align32pow2(params->soft.tm.nb_queues);
+
+ /* qsize */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ if (params->soft.tm.qsize[i] == 0)
+ return -EINVAL;
+
+ params->soft.tm.qsize[i] =
+ rte_align32pow2(params->soft.tm.qsize[i]);
+ }
+
+ /* enq_bsz, deq_bsz */
+ if (params->soft.tm.enq_bsz == 0 ||
+ params->soft.tm.deq_bsz == 0 ||
+ params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+tm_hierarchy_init(struct pmd_internals *p)
+{
+ memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+
+ /* Initialize shaper profile list */
+ TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
+
+ /* Initialize shared shaper list */
+ TAILQ_INIT(&p->soft.tm.h.shared_shapers);
+
+ /* Initialize wred profile list */
+ TAILQ_INIT(&p->soft.tm.h.wred_profiles);
+
+ /* Initialize TM node list */
+ TAILQ_INIT(&p->soft.tm.h.nodes);
+}
+
+static void
+tm_hierarchy_uninit(struct pmd_internals *p)
+{
+ /* Remove all nodes*/
+ for ( ; ; ) {
+ struct tm_node *tm_node;
+
+ tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
+ if (tm_node == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
+ free(tm_node);
+ }
+
+ /* Remove all WRED profiles */
+ for ( ; ; ) {
+ struct tm_wred_profile *wred_profile;
+
+ wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
+ if (wred_profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
+ free(wred_profile);
+ }
+
+ /* Remove all shared shapers */
+ for ( ; ; ) {
+ struct tm_shared_shaper *shared_shaper;
+
+ shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
+ if (shared_shaper == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
+ free(shared_shaper);
+ }
+
+ /* Remove all shaper profiles */
+ for ( ; ; ) {
+ struct tm_shaper_profile *shaper_profile;
+
+ shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
+ if (shaper_profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
+ shaper_profile, node);
+ free(shaper_profile);
+ }
+
+ memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+}
+
+int
+tm_init(struct pmd_internals *p,
+ struct pmd_params *params,
+ int numa_node)
+{
+ uint32_t enq_bsz = params->soft.tm.enq_bsz;
+ uint32_t deq_bsz = params->soft.tm.deq_bsz;
+
+ p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
+ 2 * enq_bsz * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+
+ if (p->soft.tm.pkts_enq == NULL)
+ return -ENOMEM;
+
+ p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
+ deq_bsz * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+
+ if (p->soft.tm.pkts_deq == NULL) {
+ rte_free(p->soft.tm.pkts_enq);
+ return -ENOMEM;
+ }
+
+ tm_hierarchy_init(p);
+
+ return 0;
+}
+
+void
+tm_free(struct pmd_internals *p)
+{
+ tm_hierarchy_uninit(p);
+ rte_free(p->soft.tm.pkts_enq);
+ rte_free(p->soft.tm.pkts_deq);
+}
+
+int
+tm_start(struct pmd_internals *p)
+{
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_subports, subport_id;
+ int status;
+
+ /* Is hierarchy frozen? */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return -1;
+
+ /* Port */
+ p->soft.tm.sched = rte_sched_port_config(&t->port_params);
+ if (p->soft.tm.sched == NULL)
+ return -1;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport =
+ t->port_params.n_pipes_per_subport;
+ uint32_t pipe_id;
+
+ status = rte_sched_subport_config(p->soft.tm.sched,
+ subport_id,
+ &t->subport_params[subport_id]);
+ if (status) {
+ rte_sched_port_free(p->soft.tm.sched);
+ return -1;
+ }
+
+ /* Pipe */
+ n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
+ pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
+
+ status = rte_sched_pipe_config(p->soft.tm.sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(p->soft.tm.sched);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+tm_stop(struct pmd_internals *p)
+{
+ if (p->soft.tm.sched)
+ rte_sched_port_free(p->soft.tm.sched);
+
+ /* Unfreeze hierarchy */
+ p->soft.tm.hierarchy_frozen = 0;
+}
+
+static struct tm_shaper_profile *
+tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
+ struct tm_shaper_profile *sp;
+
+ TAILQ_FOREACH(sp, spl, node)
+ if (shaper_profile_id == sp->shaper_profile_id)
+ return sp;
+
+ return NULL;
+}
+
+static struct tm_shared_shaper *
+tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
+ struct tm_shared_shaper *ss;
+
+ TAILQ_FOREACH(ss, ssl, node)
+ if (shared_shaper_id == ss->shared_shaper_id)
+ return ss;
+
+ return NULL;
+}
+
+static struct tm_wred_profile *
+tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
+ struct tm_wred_profile *wp;
+
+ TAILQ_FOREACH(wp, wpl, node)
+ if (wred_profile_id == wp->wred_profile_id)
+ return wp;
+
+ return NULL;
+}
+
+static struct tm_node *
+tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node)
+ if (n->node_id == node_id)
+ return n;
+
+ return NULL;
+}
+
+static struct tm_node *
+tm_root_node_present(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node)
+ if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
+ return n;
+
+ return NULL;
+}
+
+static uint32_t
+tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *ns;
+ uint32_t subport_id;
+
+ subport_id = 0;
+ TAILQ_FOREACH(ns, nl, node) {
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ if (ns->node_id == subport_node->node_id)
+ return subport_id;
+
+ subport_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *np;
+ uint32_t pipe_id;
+
+ pipe_id = 0;
+ TAILQ_FOREACH(np, nl, node) {
+ if (np->level != TM_NODE_LEVEL_PIPE ||
+ np->parent_node_id != pipe_node->parent_node_id)
+ continue;
+
+ if (np->node_id == pipe_node->node_id)
+ return pipe_id;
+
+ pipe_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
+{
+ return tc_node->priority;
+}
+
+static uint32_t
+tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *nq;
+ uint32_t queue_id;
+
+ queue_id = 0;
+ TAILQ_FOREACH(nq, nl, node) {
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node_id != queue_node->parent_node_id)
+ continue;
+
+ if (nq->node_id == queue_node->node_id)
+ return queue_id;
+
+ queue_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t n_queues_max = p->params.soft.tm.nb_queues;
+ uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+ uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_subports_max = n_pipes_max;
+ uint32_t n_root_max = 1;
+
+ switch (level) {
+ case TM_NODE_LEVEL_PORT:
+ return n_root_max;
+ case TM_NODE_LEVEL_SUBPORT:
+ return n_subports_max;
+ case TM_NODE_LEVEL_PIPE:
+ return n_pipes_max;
+ case TM_NODE_LEVEL_TC:
+ return n_tc_max;
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ return n_queues_max;
+ }
+}
+
+/* Traffic manager node type get */
+static int
+pmd_tm_node_type_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ if (is_leaf == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (node_id == RTE_TM_NODE_ID_NULL ||
+ (tm_node_search(dev, node_id) == NULL))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ *is_leaf = node_id < p->params.soft.tm.nb_queues;
+
+ return 0;
+}
+
+#ifdef RTE_SCHED_RED
+#define WRED_SUPPORTED 1
+#else
+#define WRED_SUPPORTED 0
+#endif
+
+#define STATS_MASK_DEFAULT \
+ (RTE_TM_STATS_N_PKTS | \
+ RTE_TM_STATS_N_BYTES | \
+ RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
+ RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
+
+#define STATS_MASK_QUEUE \
+ (STATS_MASK_DEFAULT | \
+ RTE_TM_STATS_N_PKTS_QUEUED)
+
+static const struct rte_tm_capabilities tm_cap = {
+ .n_nodes_max = UINT32_MAX,
+ .n_levels_max = TM_NODE_LEVEL_MAX,
+
+ .non_leaf_nodes_identical = 0,
+ .leaf_nodes_identical = 1,
+
+ .shaper_n_max = UINT32_MAX,
+ .shaper_private_n_max = UINT32_MAX,
+ .shaper_private_dual_rate_n_max = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+
+ .shaper_shared_n_max = UINT32_MAX,
+ .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
+ .shaper_shared_n_shapers_per_node_max = 1,
+ .shaper_shared_dual_rate_n_max = 0,
+ .shaper_shared_rate_min = 1,
+ .shaper_shared_rate_max = UINT32_MAX,
+
+ .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
+ .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+
+ .cman_head_drop_supported = 0,
+ .cman_wred_context_n_max = 0,
+ .cman_wred_context_private_n_max = 0,
+ .cman_wred_context_shared_n_max = 0,
+ .cman_wred_context_shared_n_nodes_per_context_max = 0,
+ .cman_wred_context_shared_n_contexts_per_node_max = 0,
+
+ .mark_vlan_dei_supported = {0, 0, 0},
+ .mark_ip_ecn_tcp_supported = {0, 0, 0},
+ .mark_ip_ecn_sctp_supported = {0, 0, 0},
+ .mark_ip_dscp_supported = {0, 0, 0},
+
+ .dynamic_update_mask = 0,
+
+ .stats_mask = STATS_MASK_QUEUE,
+};
+
+/* Traffic manager capabilities get */
+static int
+pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_cap, sizeof(*cap));
+
+ cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
+
+ cap->shaper_private_n_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
+
+ cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
+
+ cap->shaper_n_max = cap->shaper_private_n_max +
+ cap->shaper_shared_n_max;
+
+ cap->shaper_shared_n_nodes_per_shaper_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
+
+ cap->sched_n_children_max = RTE_MAX(
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
+ (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
+
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+
+ if (WRED_SUPPORTED)
+ cap->cman_wred_context_private_n_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
+
+ cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
+ cap->cman_wred_context_shared_n_max;
+
+ return 0;
+}
+
+static const struct rte_tm_level_capabilities tm_level_cap[] = {
+ [TM_NODE_LEVEL_PORT] = {
+ .n_nodes_max = 1,
+ .n_nodes_nonleaf_max = 1,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ .nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = 1,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+ },
+
+ [TM_NODE_LEVEL_SUBPORT] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ .nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ .sched_wfq_weight_max = UINT32_MAX,
+#else
+ .sched_wfq_weight_max = 1,
+#endif
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+ },
+
+ [TM_NODE_LEVEL_PIPE] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ .nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_sp_n_priorities_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = 1,
+ .sched_wfq_n_groups_max = 0,
+ .sched_wfq_weight_max = 1,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+ },
+
+ [TM_NODE_LEVEL_TC] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ .nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 1,
+
+ .sched_n_children_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+ },
+
+ [TM_NODE_LEVEL_QUEUE] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = 0,
+ .n_nodes_leaf_max = UINT32_MAX,
+ .non_leaf_nodes_identical = 0,
+ .leaf_nodes_identical = 1,
+
+ .leaf = {
+ .shaper_private_supported = 0,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 0,
+ .shaper_private_rate_max = 0,
+ .shaper_shared_n_max = 0,
+
+ .cman_head_drop_supported = 0,
+ .cman_wred_context_private_supported = WRED_SUPPORTED,
+ .cman_wred_context_shared_n_max = 0,
+
+ .stats_mask = STATS_MASK_QUEUE,
+ },
+ },
+};
+
+/* Traffic manager level capabilities get */
+static int
+pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (level_id >= TM_NODE_LEVEL_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
+
+ switch (level_id) {
+ case TM_NODE_LEVEL_PORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ break;
+
+ case TM_NODE_LEVEL_TC:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_TC);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ break;
+
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_QUEUE);
+ cap->n_nodes_leaf_max = cap->n_nodes_max;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct rte_tm_node_capabilities tm_node_cap[] = {
+ [TM_NODE_LEVEL_PORT] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .nonleaf = {
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = 1,
+ },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_SUBPORT] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .nonleaf = {
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+ },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_PIPE] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .nonleaf = {
+ .sched_n_children_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_sp_n_priorities_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = 1,
+ .sched_wfq_n_groups_max = 0,
+ .sched_wfq_weight_max = 1,
+ },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_TC] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 1,
+
+ .nonleaf = {
+ .sched_n_children_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+ },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_QUEUE] = {
+ .shaper_private_supported = 0,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 0,
+ .shaper_private_rate_max = 0,
+ .shaper_shared_n_max = 0,
+
+
+ .leaf = {
+ .cman_head_drop_supported = 0,
+ .cman_wred_context_private_supported = WRED_SUPPORTED,
+ .cman_wred_context_shared_n_max = 0,
+ },
+
+ .stats_mask = STATS_MASK_QUEUE,
+ },
+};
+
+/* Traffic manager node capabilities get */
+static int
+pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct tm_node *tm_node;
+
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ tm_node = tm_node_search(dev, node_id);
+ if (tm_node == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
+
+ switch (tm_node->level) {
+ case TM_NODE_LEVEL_PORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ case TM_NODE_LEVEL_TC:
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+shaper_profile_check(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct tm_shaper_profile *sp;
+
+ /* Shaper profile ID must not be NONE. */
+ if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper profile must not exist. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp)
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak rate: non-zero, 32-bit */
+ if (profile->peak.rate == 0 ||
+ profile->peak.rate >= UINT32_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak size: non-zero, 32-bit */
+ if (profile->peak.size == 0 ||
+ profile->peak.size >= UINT32_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Dual-rate profiles are not supported. */
+ if (profile->committed.rate != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Packet length adjust: 24 bytes */
+ if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+/* Traffic manager shaper profile add */
+static int
+pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
+ struct tm_shaper_profile *sp;
+ int status;
+
+ /* Check input params */
+ status = shaper_profile_check(dev, shaper_profile_id, profile, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ sp = calloc(1, sizeof(struct tm_shaper_profile));
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ sp->shaper_profile_id = shaper_profile_id;
+ memcpy(&sp->params, profile, sizeof(sp->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(spl, sp, node);
+ p->soft.tm.h.n_shaper_profiles++;
+
+ return 0;
+}
+
+/* Traffic manager shaper profile delete */
+static int
+pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile *sp;
+
+ /* Check existing */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (sp->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
+ p->soft.tm.h.n_shaper_profiles--;
+ free(sp);
+
+ return 0;
+}
+
+static struct tm_node *
+tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
+ struct tm_shared_shaper *ss)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ /* Subport: each TC uses shared shaper */
+ TAILQ_FOREACH(n, nl, node) {
+ if (n->level != TM_NODE_LEVEL_TC ||
+ n->params.n_shared_shapers == 0 ||
+ n->params.shared_shaper_id[0] != ss->shared_shaper_id)
+ continue;
+
+ return n;
+ }
+
+ return NULL;
+}
+
+static int
+update_subport_tc_rate(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct tm_shared_shaper *ss,
+ struct tm_shaper_profile *sp_new)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_subport_params subport_params;
+
+ struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
+ ss->shaper_profile_id);
+
+ /* Derive new subport configuration. */
+ memcpy(&subport_params,
+ &p->soft.tm.params.subport_params[subport_id],
+ sizeof(subport_params));
+ subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
+
+ /* Update the subport configuration. */
+ if (rte_sched_subport_config(p->soft.tm.sched,
+ subport_id, &subport_params))
+ return -1;
+
+ /* Commit changes. */
+ sp_old->n_users--;
+
+ ss->shaper_profile_id = sp_new->shaper_profile_id;
+ sp_new->n_users++;
+
+ memcpy(&p->soft.tm.params.subport_params[subport_id],
+ &subport_params,
+ sizeof(subport_params));
+
+ return 0;
+}
+
+/* Traffic manager shared shaper add/update */
+static int
+pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper *ss;
+ struct tm_shaper_profile *sp;
+ struct tm_node *nt;
+
+ /* Shaper profile must be valid. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /**
+ * Add new shared shaper
+ */
+ ss = tm_shared_shaper_search(dev, shared_shaper_id);
+ if (ss == NULL) {
+ struct tm_shared_shaper_list *ssl =
+ &p->soft.tm.h.shared_shapers;
+
+ /* Hierarchy must not be frozen */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Memory allocation */
+ ss = calloc(1, sizeof(struct tm_shared_shaper));
+ if (ss == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ ss->shared_shaper_id = shared_shaper_id;
+ ss->shaper_profile_id = shaper_profile_id;
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(ssl, ss, node);
+ p->soft.tm.h.n_shared_shapers++;
+
+ return 0;
+ }
+
+ /**
+ * Update existing shared shaper
+ */
+ /* Hierarchy must be frozen (run-time update) */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+
+ /* Propagate change. */
+ nt = tm_shared_shaper_get_tc(dev, ss);
+ if (update_subport_tc_rate(dev, nt, ss, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+/* Traffic manager shared shaper delete */
+static int
+pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper *ss;
+
+ /* Check existing */
+ ss = tm_shared_shaper_search(dev, shared_shaper_id);
+ if (ss == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (ss->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
+ p->soft.tm.h.n_shared_shapers--;
+ free(ss);
+
+ return 0;
+}
+
+static int
+wred_profile_check(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct tm_wred_profile *wp;
+ enum rte_tm_color color;
+
+ /* WRED profile ID must not be NONE. */
+ if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* WRED profile must not exist. */
+ wp = tm_wred_profile_search(dev, wred_profile_id);
+ if (wp)
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* min_th <= max_th, max_th > 0 */
+ for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+ uint16_t min_th = profile->red_params[color].min_th;
+ uint16_t max_th = profile->red_params[color].max_th;
+
+ if (min_th > max_th || max_th == 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager WRED profile add */
+static int
+pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
+ struct tm_wred_profile *wp;
+ int status;
+
+ /* Check input params */
+ status = wred_profile_check(dev, wred_profile_id, profile, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ wp = calloc(1, sizeof(struct tm_wred_profile));
+ if (wp == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ wp->wred_profile_id = wred_profile_id;
+ memcpy(&wp->params, profile, sizeof(wp->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(wpl, wp, node);
+ p->soft.tm.h.n_wred_profiles++;
+
+ return 0;
+}
+
+/* Traffic manager WRED profile delete */
+static int
+pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile *wp;
+
+ /* Check existing */
+ wp = tm_wred_profile_search(dev, wred_profile_id);
+ if (wp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (wp->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
+ p->soft.tm.h.n_wred_profiles--;
+ free(wp);
+
+ return 0;
+}
+
+static int
+node_add_check_port(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
+ params->shaper_profile_id);
+
+ /* node type: non-leaf */
+ if (node_id < p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid.
+ * Shaper profile peak rate must fit the configured port rate.
+ */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ sp == NULL ||
+ sp->params.peak.rate > p->params.soft.tm.rate)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_subport(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_pipe(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight __rte_unused,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 4 */
+ if (params->nonleaf.n_sp_priorities !=
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* WFQ mode must be byte mode */
+ if (params->nonleaf.wfq_weight_mode != NULL &&
+ params->nonleaf.wfq_weight_mode[0] != 0 &&
+ params->nonleaf.wfq_weight_mode[1] != 0 &&
+ params->nonleaf.wfq_weight_mode[2] != 0 &&
+ params->nonleaf.wfq_weight_mode[3] != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_tc(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority __rte_unused,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Single valid shared shaper */
+ if (params->n_shared_shapers > 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (params->n_shared_shapers == 1 &&
+ (params->shared_shaper_id == NULL ||
+ (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_queue(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight __rte_unused,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: leaf */
+ if (node_id >= p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shaper */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Congestion management must not be head drop */
+ if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Congestion management set to WRED */
+ if (params->leaf.cman == RTE_TM_CMAN_WRED) {
+ uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
+ struct tm_wred_profile *wp = tm_wred_profile_search(dev,
+ wred_profile_id);
+
+ /* WRED profile (for private WRED context) must be valid */
+ if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
+ wp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared WRED contexts */
+ if (params->leaf.wred.n_shared_wred_contexts != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_QUEUE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct tm_node *pn;
+ uint32_t level;
+ int status;
+
+ /* node_id, parent_node_id:
+ * -node_id must not be RTE_TM_NODE_ID_NULL
+ * -node_id must not be in use
+ * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
+ * -root node must not exist
+ * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
+ * -parent_node_id must be valid
+ */
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (tm_node_search(dev, node_id))
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ pn = NULL;
+ if (tm_root_node_present(dev))
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ } else {
+ pn = tm_node_search(dev, parent_node_id);
+ if (pn == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* priority: must be 0 .. 3 */
+ if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* weight: must be 1 .. 255 */
+ if (weight == 0 || weight >= UINT8_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* level_id: if valid, then
+ * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
+ * -level_id must be zero
+ * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
+ * -level_id must be parent level ID plus one
+ */
+ level = (pn == NULL) ? 0 : pn->level + 1;
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* params: must not be NULL */
+ if (params == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* params: per level checks */
+ switch (level) {
+ case TM_NODE_LEVEL_PORT:
+ status = node_add_check_port(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ status = node_add_check_subport(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ status = node_add_check_pipe(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_TC:
+ status = node_add_check_tc(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_QUEUE:
+ status = node_add_check_queue(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager node add */
+static int
+pmd_tm_node_add(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+ uint32_t i;
+ int status;
+
+ /* Checks */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ status = node_add_check(dev, node_id, parent_node_id, priority, weight,
+ level_id, params, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ n = calloc(1, sizeof(struct tm_node));
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ n->node_id = node_id;
+ n->parent_node_id = parent_node_id;
+ n->priority = priority;
+ n->weight = weight;
+
+ if (parent_node_id != RTE_TM_NODE_ID_NULL) {
+ n->parent_node = tm_node_search(dev, parent_node_id);
+ n->level = n->parent_node->level + 1;
+ }
+
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
+ n->shaper_profile = tm_shaper_profile_search(dev,
+ params->shaper_profile_id);
+
+ if (n->level == TM_NODE_LEVEL_QUEUE &&
+ params->leaf.cman == RTE_TM_CMAN_WRED)
+ n->wred_profile = tm_wred_profile_search(dev,
+ params->leaf.wred.wred_profile_id);
+
+ memcpy(&n->params, params, sizeof(n->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(nl, n, node);
+ p->soft.tm.h.n_nodes++;
+
+ /* Update dependencies */
+ if (n->parent_node)
+ n->parent_node->n_children++;
+
+ if (n->shaper_profile)
+ n->shaper_profile->n_users++;
+
+ for (i = 0; i < params->n_shared_shapers; i++) {
+ struct tm_shared_shaper *ss;
+
+ ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
+ ss->n_users++;
+ }
+
+ if (n->wred_profile)
+ n->wred_profile->n_users++;
+
+ p->soft.tm.h.n_tm_nodes[n->level]++;
+
+ return 0;
+}
+
+/* Traffic manager node delete */
+static int
+pmd_tm_node_delete(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node *n;
+ uint32_t i;
+
+ /* Check hierarchy changes are currently allowed */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Check existing */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (n->n_children)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Update dependencies */
+ p->soft.tm.h.n_tm_nodes[n->level]--;
+
+ if (n->wred_profile)
+ n->wred_profile->n_users--;
+
+ for (i = 0; i < n->params.n_shared_shapers; i++) {
+ struct tm_shared_shaper *ss;
+
+ ss = tm_shared_shaper_search(dev,
+ n->params.shared_shaper_id[i]);
+ ss->n_users--;
+ }
+
+ if (n->shaper_profile)
+ n->shaper_profile->n_users--;
+
+ if (n->parent_node)
+ n->parent_node->n_children--;
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
+ p->soft.tm.h.n_nodes--;
+ free(n);
+
+ return 0;
+}
+
+
+static void
+pipe_profile_build(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct rte_sched_pipe_params *pp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *nt, *nq;
+
+ memset(pp, 0, sizeof(*pp));
+
+ /* Pipe */
+ pp->tb_rate = np->shaper_profile->params.peak.rate;
+ pp->tb_size = np->shaper_profile->params.peak.size;
+
+ /* Traffic Class (TC) */
+ pp->tc_period = PIPE_TC_PERIOD;
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ pp->tc_ov_weight = np->weight;
+#endif
+
+ TAILQ_FOREACH(nt, nl, node) {
+ uint32_t queue_id = 0;
+
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node_id != np->node_id)
+ continue;
+
+ pp->tc_rate[nt->priority] =
+ nt->shaper_profile->params.peak.rate;
+
+ /* Queue */
+ TAILQ_FOREACH(nq, nl, node) {
+ uint32_t pipe_queue_id;
+
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node_id != nt->node_id)
+ continue;
+
+ pipe_queue_id = nt->priority *
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+ pp->wrr_weights[pipe_queue_id] = nq->weight;
+
+ queue_id++;
+ }
+ }
+}
+
+static int
+pipe_profile_free_exists(struct rte_eth_dev *dev,
+ uint32_t *pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
+ *pipe_profile_id = t->n_pipe_profiles;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+pipe_profile_exists(struct rte_eth_dev *dev,
+ struct rte_sched_pipe_params *pp,
+ uint32_t *pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t i;
+
+ for (i = 0; i < t->n_pipe_profiles; i++)
+ if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
+ if (pipe_profile_id)
+ *pipe_profile_id = i;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+pipe_profile_install(struct rte_eth_dev *dev,
+ struct rte_sched_pipe_params *pp,
+ uint32_t pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
+ t->n_pipe_profiles++;
+}
+
+static void
+pipe_profile_mark(struct rte_eth_dev *dev,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_pipes_per_subport, pos;
+
+ n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+ pos = subport_id * n_pipes_per_subport + pipe_id;
+
+ t->pipe_to_profile[pos] = pipe_profile_id;
+}
+
+static struct rte_sched_pipe_params *
+pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
+ uint32_t pipe_profile_id = t->pipe_to_profile[pos];
+
+ return &t->pipe_profiles[pipe_profile_id];
+}
+
+static int
+pipe_profiles_generate(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *ns, *np;
+ uint32_t subport_id;
+
+ /* Objective: Fill in the following fields in struct tm_params:
+ * - pipe_profiles
+ * - n_pipe_profiles
+ * - pipe_to_profile
+ */
+
+ subport_id = 0;
+ TAILQ_FOREACH(ns, nl, node) {
+ uint32_t pipe_id;
+
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ pipe_id = 0;
+ TAILQ_FOREACH(np, nl, node) {
+ struct rte_sched_pipe_params pp;
+ uint32_t pos;
+
+ if (np->level != TM_NODE_LEVEL_PIPE ||
+ np->parent_node_id != ns->node_id)
+ continue;
+
+ pipe_profile_build(dev, np, &pp);
+
+ if (!pipe_profile_exists(dev, &pp, &pos)) {
+ if (!pipe_profile_free_exists(dev, &pos))
+ return -1;
+
+ pipe_profile_install(dev, &pp, pos);
+ }
+
+ pipe_profile_mark(dev, subport_id, pipe_id, pos);
+
+ pipe_id++;
+ }
+
+ subport_id++;
+ }
+
+ return 0;
+}
+
+static struct tm_wred_profile *
+tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *nq;
+
+ TAILQ_FOREACH(nq, nl, node) {
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node->priority != tc_id)
+ continue;
+
+ return nq->wred_profile;
+ }
+
+ return NULL;
+}
+
+#ifdef RTE_SCHED_RED
+
+static void
+wred_profiles_set(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
+ uint32_t tc_id;
+ enum rte_tm_color color;
+
+ for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
+ for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+ struct rte_red_params *dst =
+ &pp->red_params[tc_id][color];
+ struct tm_wred_profile *src_wp =
+ tm_tc_wred_profile_get(dev, tc_id);
+ struct rte_tm_red_params *src =
+ &src_wp->params.red_params[color];
+
+ memcpy(dst, src, sizeof(*dst));
+ }
+}
+
+#else
+
+#define wred_profiles_set(dev)
+
+#endif
+
+static struct tm_shared_shaper *
+tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
+{
+ return (tc_node->params.n_shared_shapers) ?
+ tm_shared_shaper_search(dev,
+ tc_node->params.shared_shaper_id[0]) :
+ NULL;
+}
+
+static struct tm_shared_shaper *
+tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
+ struct tm_node *subport_node,
+ uint32_t tc_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node) {
+ if (n->level != TM_NODE_LEVEL_TC ||
+ n->parent_node->parent_node_id !=
+ subport_node->node_id ||
+ n->priority != tc_id)
+ continue;
+
+ return tm_tc_shared_shaper_get(dev, n);
+ }
+
+ return NULL;
+}
+
+static int
+hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_shared_shaper_list *ssl = &h->shared_shapers;
+ struct tm_wred_profile_list *wpl = &h->wred_profiles;
+ struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
+ struct tm_shared_shaper *ss;
+
+ uint32_t n_pipes_per_subport;
+
+ /* Root node exists. */
+ if (nr == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* There is at least one subport, max is not exceeded. */
+ if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* There is at least one pipe. */
+ if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of pipes is the same for all subports. Maximum number of pipes
+ * per subport is not exceeded.
+ */
+ n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ TAILQ_FOREACH(ns, nl, node) {
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ if (ns->n_children != n_pipes_per_subport)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
+ TAILQ_FOREACH(np, nl, node) {
+ uint32_t mask = 0, mask_expected =
+ RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ uint32_t);
+
+ if (np->level != TM_NODE_LEVEL_PIPE)
+ continue;
+
+ if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node_id != np->node_id)
+ continue;
+
+ mask |= 1 << nt->priority;
+ }
+
+ if (mask != mask_expected)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Each TC has exactly 4 packet queues. */
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC)
+ continue;
+
+ if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /**
+ * Shared shapers:
+ * -For each TC #i, all pipes in the same subport use the same
+ * shared shaper (or no shared shaper) for their TC#i.
+ * -Each shared shaper needs to have at least one user. All its
+ * users have to be TC nodes with the same priority and the same
+ * subport.
+ */
+ TAILQ_FOREACH(ns, nl, node) {
+ struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t id;
+
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
+ s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
+
+ TAILQ_FOREACH(nt, nl, node) {
+ struct tm_shared_shaper *subport_ss, *tc_ss;
+
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node->parent_node_id !=
+ ns->node_id)
+ continue;
+
+ subport_ss = s[nt->priority];
+ tc_ss = tm_tc_shared_shaper_get(dev, nt);
+
+ if (subport_ss == NULL && tc_ss == NULL)
+ continue;
+
+ if ((subport_ss == NULL && tc_ss != NULL) ||
+ (subport_ss != NULL && tc_ss == NULL) ||
+ subport_ss->shared_shaper_id !=
+ tc_ss->shared_shaper_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+
+ TAILQ_FOREACH(ss, ssl, node) {
+ struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
+ uint32_t n_users = 0;
+
+ if (nt_any != NULL)
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->priority != nt_any->priority ||
+ nt->parent_node->parent_node_id !=
+ nt_any->parent_node->parent_node_id)
+ continue;
+
+ n_users++;
+ }
+
+ if (ss->n_users == 0 || ss->n_users != n_users)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Not too many pipe profiles. */
+ if (pipe_profiles_generate(dev))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /**
+ * WRED (when used, i.e. at least one WRED profile defined):
+ * -Each WRED profile must have at least one user.
+ * -All leaf nodes must have their private WRED context enabled.
+ * -For each TC #i, all leaf nodes must use the same WRED profile
+ * for their private WRED context.
+ */
+ if (h->n_wred_profiles) {
+ struct tm_wred_profile *wp;
+ struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t id;
+
+ TAILQ_FOREACH(wp, wpl, node)
+ if (wp->n_users == 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
+ w[id] = tm_tc_wred_profile_get(dev, id);
+
+ if (w[id] == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ TAILQ_FOREACH(nq, nl, node) {
+ uint32_t id;
+
+ if (nq->level != TM_NODE_LEVEL_QUEUE)
+ continue;
+
+ id = nq->parent_node->priority;
+
+ if (nq->wred_profile == NULL ||
+ nq->wred_profile->wred_profile_id !=
+ w[id]->wred_profile_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+
+ return 0;
+}
+
+static void
+hierarchy_blueprints_create(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *root = tm_root_node_present(dev), *n;
+
+ uint32_t subport_id;
+
+ t->port_params = (struct rte_sched_port_params) {
+ .name = dev->data->name,
+ .socket = dev->data->numa_node,
+ .rate = root->shaper_profile->params.peak.rate,
+ .mtu = dev->data->mtu,
+ .frame_overhead =
+ root->shaper_profile->params.pkt_length_adjust,
+ .n_subports_per_port = root->n_children,
+ .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
+ .qsize = {p->params.soft.tm.qsize[0],
+ p->params.soft.tm.qsize[1],
+ p->params.soft.tm.qsize[2],
+ p->params.soft.tm.qsize[3],
+ },
+ .pipe_profiles = t->pipe_profiles,
+ .n_pipe_profiles = t->n_pipe_profiles,
+ };
+
+ wred_profiles_set(dev);
+
+ subport_id = 0;
+ TAILQ_FOREACH(n, nl, node) {
+ uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t i;
+
+ if (n->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ struct tm_shared_shaper *ss;
+ struct tm_shaper_profile *sp;
+
+ ss = tm_subport_tc_shared_shaper_get(dev, n, i);
+ sp = (ss) ? tm_shaper_profile_search(dev,
+ ss->shaper_profile_id) :
+ n->shaper_profile;
+ tc_rate[i] = sp->params.peak.rate;
+ }
+
+ t->subport_params[subport_id] =
+ (struct rte_sched_subport_params) {
+ .tb_rate = n->shaper_profile->params.peak.rate,
+ .tb_size = n->shaper_profile->params.peak.size,
+
+ .tc_rate = {tc_rate[0],
+ tc_rate[1],
+ tc_rate[2],
+ tc_rate[3],
+ },
+ .tc_period = SUBPORT_TC_PERIOD,
+ };
+
+ subport_id++;
+ }
+}
+
+/* Traffic manager hierarchy commit */
+static int
+pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ int status;
+
+ /* Checks */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ status = hierarchy_commit_check(dev, error);
+ if (status) {
+ if (clear_on_fail) {
+ tm_hierarchy_uninit(p);
+ tm_hierarchy_init(p);
+ }
+
+ return status;
+ }
+
+ /* Create blueprints */
+ hierarchy_blueprints_create(dev);
+
+ /* Freeze hierarchy */
+ p->soft.tm.hierarchy_frozen = 1;
+
+ return 0;
+}
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+
+static int
+update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tc_ov_weight = (uint8_t)weight;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ np->weight = weight;
+
+ return 0;
+}
+
+#endif
+
+static int
+update_queue_weight(struct rte_eth_dev *dev,
+ struct tm_node *nq, uint32_t weight)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t queue_id = tm_node_queue_id(dev, nq);
+
+ struct tm_node *nt = nq->parent_node;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t pipe_queue_id =
+ tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set
+ * of pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ nq->weight = weight;
+
+ return 0;
+}
+
+/* Traffic manager node parent update */
+static int
+pmd_tm_node_parent_update(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Parent node must be the same */
+ if (n->parent_node_id != parent_node_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be the same */
+ if (n->priority != priority)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* weight: must be 1 .. 255 */
+ if (weight == 0 || weight >= UINT8_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_SUBPORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_PIPE:
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ if (update_pipe_weight(dev, n, weight))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+#else
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+#endif
+ /* fall-through */
+ case TM_NODE_LEVEL_TC:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_QUEUE:
+ /* fall-through */
+ default:
+ if (update_queue_weight(dev, n, weight))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ }
+}
+
+static int
+update_subport_rate(struct rte_eth_dev *dev,
+ struct tm_node *ns,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_subport_params subport_params;
+
+ /* Derive new subport configuration. */
+ memcpy(&subport_params,
+ &p->soft.tm.params.subport_params[subport_id],
+ sizeof(subport_params));
+ subport_params.tb_rate = sp->params.peak.rate;
+ subport_params.tb_size = sp->params.peak.size;
+
+ /* Update the subport configuration. */
+ if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
+ &subport_params))
+ return -1;
+
+ /* Commit changes. */
+ ns->shaper_profile->n_users--;
+
+ ns->shaper_profile = sp;
+ ns->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ memcpy(&p->soft.tm.params.subport_params[subport_id],
+ &subport_params,
+ sizeof(subport_params));
+
+ return 0;
+}
+
+static int
+update_pipe_rate(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tb_rate = sp->params.peak.rate;
+ profile1.tb_size = sp->params.peak.size;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ np->shaper_profile->n_users--;
+ np->shaper_profile = sp;
+ np->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ return 0;
+}
+
+static int
+update_tc_rate(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tc_rate[tc_id] = sp->params.peak.rate;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ nt->shaper_profile->n_users--;
+ nt->shaper_profile = sp;
+ nt->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ return 0;
+}
+
+/* Traffic manager node shaper update */
+static int
+pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+ struct tm_shaper_profile *sp;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper profile must be valid. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_SUBPORT:
+ if (update_subport_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_PIPE:
+ if (update_pipe_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_TC:
+ if (update_tc_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_QUEUE:
+ /* fall-through */
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+}
+
+static inline uint32_t
+tm_port_queue_id(struct rte_eth_dev *dev,
+ uint32_t port_subport_id,
+ uint32_t subport_pipe_id,
+ uint32_t pipe_tc_id,
+ uint32_t tc_queue_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ uint32_t port_pipe_id =
+ port_subport_id * n_pipes_per_subport + subport_pipe_id;
+ uint32_t port_tc_id =
+ port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
+ uint32_t port_queue_id =
+ port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
+
+ return port_queue_id;
+}
+
+static int
+read_port_stats(struct rte_eth_dev *dev,
+ struct tm_node *nr,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+ uint32_t subport_id;
+
+ for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
+ struct rte_sched_subport_stats s;
+ uint32_t tc_ov, id;
+
+ /* Stats read */
+ int status = rte_sched_subport_read_stats(
+ p->soft.tm.sched,
+ subport_id,
+ &s,
+ &tc_ov);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
+ nr->stats.n_pkts +=
+ s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
+ nr->stats.n_bytes +=
+ s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
+ nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+ s.n_pkts_tc_dropped[id];
+ nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_tc_dropped[id];
+ }
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nr->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nr->stats, 0, sizeof(nr->stats));
+
+ return 0;
+}
+
+static int
+read_subport_stats(struct rte_eth_dev *dev,
+ struct tm_node *ns,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+ struct rte_sched_subport_stats s;
+ uint32_t tc_ov, tc_id;
+
+ /* Stats read */
+ int status = rte_sched_subport_read_stats(
+ p->soft.tm.sched,
+ subport_id,
+ &s,
+ &tc_ov);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
+ ns->stats.n_pkts +=
+ s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
+ ns->stats.n_bytes +=
+ s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
+ ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+ s.n_pkts_tc_dropped[tc_id];
+ ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_tc_dropped[tc_id];
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &ns->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&ns->stats, 0, sizeof(ns->stats));
+
+ return 0;
+}
+
+static int
+read_pipe_stats(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t i;
+
+ /* Stats read */
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+
+ int status = rte_sched_queue_read_stats(
+ p->soft.tm.sched,
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ np->stats.leaf.n_pkts_queued = qlen;
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &np->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ return 0;
+}
+
+static int
+read_tc_stats(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t i;
+
+ /* Stats read */
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ tc_id,
+ i);
+
+ int status = rte_sched_queue_read_stats(
+ p->soft.tm.sched,
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_queued = qlen;
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nt->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nt->stats, 0, sizeof(nt->stats));
+
+ return 0;
+}
+
+static int
+read_queue_stats(struct rte_eth_dev *dev,
+ struct tm_node *nq,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t queue_id = tm_node_queue_id(dev, nq);
+
+ struct tm_node *nt = nq->parent_node;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ /* Stats read */
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ tc_id,
+ queue_id);
+
+ int status = rte_sched_queue_read_stats(
+ p->soft.tm.sched,
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ nq->stats.leaf.n_pkts_queued = qlen;
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nq->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_QUEUE;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nq->stats, 0, sizeof(nq->stats));
+
+ return 0;
+}
+
+/* Traffic manager read stats counters for specific node */
+static int
+pmd_tm_node_stats_read(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ if (read_port_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ if (read_subport_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_PIPE:
+ if (read_pipe_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_TC:
+ if (read_tc_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ if (read_queue_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ }
+}
+
+const struct rte_tm_ops pmd_tm_ops = {
+ .node_type_get = pmd_tm_node_type_get,
+ .capabilities_get = pmd_tm_capabilities_get,
+ .level_capabilities_get = pmd_tm_level_capabilities_get,
+ .node_capabilities_get = pmd_tm_node_capabilities_get,
+
+ .wred_profile_add = pmd_tm_wred_profile_add,
+ .wred_profile_delete = pmd_tm_wred_profile_delete,
+ .shared_wred_context_add_update = NULL,
+ .shared_wred_context_delete = NULL,
+
+ .shaper_profile_add = pmd_tm_shaper_profile_add,
+ .shaper_profile_delete = pmd_tm_shaper_profile_delete,
+ .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
+ .shared_shaper_delete = pmd_tm_shared_shaper_delete,
+
+ .node_add = pmd_tm_node_add,
+ .node_delete = pmd_tm_node_delete,
+ .node_suspend = NULL,
+ .node_resume = NULL,
+ .hierarchy_commit = pmd_tm_hierarchy_commit,
+
+ .node_parent_update = pmd_tm_node_parent_update,
+ .node_shaper_update = pmd_tm_node_shaper_update,
+ .node_shared_shaper_update = NULL,
+ .node_stats_update = NULL,
+ .node_wfq_weight_mode_update = NULL,
+ .node_cman_update = NULL,
+ .node_wred_context_update = NULL,
+ .node_shared_wred_context_update = NULL,
+
+ .node_stats_read = pmd_tm_node_stats_read,
+};
diff --git a/drivers/net/softnic/rte_pmd_eth_softnic_version.map b/drivers/net/softnic/rte_pmd_eth_softnic_version.map
new file mode 100644
index 00000000..fb2cb68c
--- /dev/null
+++ b/drivers/net/softnic/rte_pmd_eth_softnic_version.map
@@ -0,0 +1,7 @@
+DPDK_17.11 {
+ global:
+
+ rte_pmd_softnic_run;
+
+ local: *;
+};