aboutsummaryrefslogtreecommitdiffstats
path: root/examples/qos_sched
diff options
context:
space:
mode:
authorC.J. Collier <cjcollier@linuxfoundation.org>2016-06-14 07:50:17 -0700
committerC.J. Collier <cjcollier@linuxfoundation.org>2016-06-14 12:17:54 -0700
commit97f17497d162afdb82c8704bf097f0fee3724b2e (patch)
tree1c6269614c0c15ffef8451c58ae8f8b30a1bc804 /examples/qos_sched
parente04be89c2409570e0055b2cda60bd11395bb93b0 (diff)
Imported Upstream version 16.04
Change-Id: I77eadcd8538a9122e4773cbe55b24033dc451757 Signed-off-by: C.J. Collier <cjcollier@linuxfoundation.org>
Diffstat (limited to 'examples/qos_sched')
-rw-r--r--examples/qos_sched/Makefile60
-rw-r--r--examples/qos_sched/app_thread.c293
-rw-r--r--examples/qos_sched/args.c485
-rw-r--r--examples/qos_sched/cfg_file.c342
-rw-r--r--examples/qos_sched/cfg_file.h46
-rw-r--r--examples/qos_sched/cmdline.c643
-rw-r--r--examples/qos_sched/init.c370
-rw-r--r--examples/qos_sched/main.c254
-rw-r--r--examples/qos_sched/main.h195
-rw-r--r--examples/qos_sched/profile.cfg104
-rw-r--r--examples/qos_sched/profile_ov.cfg90
-rw-r--r--examples/qos_sched/stats.c315
12 files changed, 3197 insertions, 0 deletions
diff --git a/examples/qos_sched/Makefile b/examples/qos_sched/Makefile
new file mode 100644
index 00000000..f59645f5
--- /dev/null
+++ b/examples/qos_sched/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(info This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+else
+
+# binary name
+APP = qos_sched
+
+# all source are stored in SRCS-y
+SRCS-y := main.c args.c init.c app_thread.c cfg_file.c cmdline.c stats.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS_args.o := -D_GNU_SOURCE
+CFLAGS_cfg_file.o := -D_GNU_SOURCE
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+
+endif
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
new file mode 100644
index 00000000..3c678cc4
--- /dev/null
+++ b/examples/qos_sched/app_thread.c
@@ -0,0 +1,293 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_branch_prediction.h>
+#include <rte_sched.h>
+
+#include "main.h"
+
+/*
+ * QoS parameters are encoded as follows:
+ * Outer VLAN ID defines subport
+ * Inner VLAN ID defines pipe
+ * Destination IP 0.0.XXX.0 defines traffic class
+ * Destination IP host (0.0.0.XXX) defines queue
+ * Values below define offset to each field from start of frame
+ */
+#define SUBPORT_OFFSET 7
+#define PIPE_OFFSET 9
+#define TC_OFFSET 20
+#define QUEUE_OFFSET 20
+#define COLOR_OFFSET 19
+
+static inline int
+get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
+ uint32_t *traffic_class, uint32_t *queue, uint32_t *color)
+{
+ uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *);
+
+ *subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
+ (port_params.n_subports_per_port - 1); /* Outer VLAN ID*/
+ *pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
+ (port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */
+ *traffic_class = (pdata[QUEUE_OFFSET] & 0x0F) &
+ (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1); /* Destination IP */
+ *queue = ((pdata[QUEUE_OFFSET] >> 8) & 0x0F) &
+ (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1) ; /* Destination IP */
+ *color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */
+
+ return 0;
+}
+
+void
+app_rx_thread(struct thread_conf **confs)
+{
+ uint32_t i, nb_rx;
+ struct rte_mbuf *rx_mbufs[burst_conf.rx_burst] __rte_cache_aligned;
+ struct thread_conf *conf;
+ int conf_idx = 0;
+
+ uint32_t subport;
+ uint32_t pipe;
+ uint32_t traffic_class;
+ uint32_t queue;
+ uint32_t color;
+
+ while ((conf = confs[conf_idx])) {
+ nb_rx = rte_eth_rx_burst(conf->rx_port, conf->rx_queue, rx_mbufs,
+ burst_conf.rx_burst);
+
+ if (likely(nb_rx != 0)) {
+ APP_STATS_ADD(conf->stat.nb_rx, nb_rx);
+
+ for(i = 0; i < nb_rx; i++) {
+ get_pkt_sched(rx_mbufs[i],
+ &subport, &pipe, &traffic_class, &queue, &color);
+ rte_sched_port_pkt_write(rx_mbufs[i], subport, pipe,
+ traffic_class, queue, (enum rte_meter_color) color);
+ }
+
+ if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
+ (void **)rx_mbufs, nb_rx) != 0)) {
+ for(i = 0; i < nb_rx; i++) {
+ rte_pktmbuf_free(rx_mbufs[i]);
+
+ APP_STATS_ADD(conf->stat.nb_drop, 1);
+ }
+ }
+ }
+ conf_idx++;
+ if (confs[conf_idx] == NULL)
+ conf_idx = 0;
+ }
+}
+
+
+
+/* Send the packet to an output interface
+ * For performance reason function returns number of packets dropped, not sent,
+ * so 0 means that all packets were sent successfully
+ */
+
+static inline void
+app_send_burst(struct thread_conf *qconf)
+{
+ struct rte_mbuf **mbufs;
+ uint32_t n, ret;
+
+ mbufs = (struct rte_mbuf **)qconf->m_table;
+ n = qconf->n_mbufs;
+
+ do {
+ ret = rte_eth_tx_burst(qconf->tx_port, qconf->tx_queue, mbufs, (uint16_t)n);
+ /* we cannot drop the packets, so re-send */
+ /* update number of packets to be sent */
+ n -= ret;
+ mbufs = (struct rte_mbuf **)&mbufs[ret];
+ } while (n);
+}
+
+
+/* Send the packet to an output interface */
+static void
+app_send_packets(struct thread_conf *qconf, struct rte_mbuf **mbufs, uint32_t nb_pkt)
+{
+ uint32_t i, len;
+
+ len = qconf->n_mbufs;
+ for(i = 0; i < nb_pkt; i++) {
+ qconf->m_table[len] = mbufs[i];
+ len++;
+ /* enough pkts to be sent */
+ if (unlikely(len == burst_conf.tx_burst)) {
+ qconf->n_mbufs = len;
+ app_send_burst(qconf);
+ len = 0;
+ }
+ }
+
+ qconf->n_mbufs = len;
+}
+
+void
+app_tx_thread(struct thread_conf **confs)
+{
+ struct rte_mbuf *mbufs[burst_conf.qos_dequeue];
+ struct thread_conf *conf;
+ int conf_idx = 0;
+ int retval;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+
+ while ((conf = confs[conf_idx])) {
+ retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
+ burst_conf.qos_dequeue);
+ if (likely(retval == 0)) {
+ app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
+
+ conf->counter = 0; /* reset empty read loop counter */
+ }
+
+ conf->counter++;
+
+ /* drain ring and TX queues */
+ if (unlikely(conf->counter > drain_tsc)) {
+ /* now check is there any packets left to be transmitted */
+ if (conf->n_mbufs != 0) {
+ app_send_burst(conf);
+
+ conf->n_mbufs = 0;
+ }
+ conf->counter = 0;
+ }
+
+ conf_idx++;
+ if (confs[conf_idx] == NULL)
+ conf_idx = 0;
+ }
+}
+
+
+void
+app_worker_thread(struct thread_conf **confs)
+{
+ struct rte_mbuf *mbufs[burst_conf.ring_burst];
+ struct thread_conf *conf;
+ int conf_idx = 0;
+
+ while ((conf = confs[conf_idx])) {
+ uint32_t nb_pkt;
+ int retval;
+
+ /* Read packet from the ring */
+ retval = rte_ring_sc_dequeue_bulk(conf->rx_ring, (void **)mbufs,
+ burst_conf.ring_burst);
+ if (likely(retval == 0)) {
+ int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
+ burst_conf.ring_burst);
+
+ APP_STATS_ADD(conf->stat.nb_drop, burst_conf.ring_burst - nb_sent);
+ APP_STATS_ADD(conf->stat.nb_rx, burst_conf.ring_burst);
+ }
+
+ nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
+ burst_conf.qos_dequeue);
+ if (likely(nb_pkt > 0))
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+
+ conf_idx++;
+ if (confs[conf_idx] == NULL)
+ conf_idx = 0;
+ }
+}
+
+
+void
+app_mixed_thread(struct thread_conf **confs)
+{
+ struct rte_mbuf *mbufs[burst_conf.ring_burst];
+ struct thread_conf *conf;
+ int conf_idx = 0;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+
+ while ((conf = confs[conf_idx])) {
+ uint32_t nb_pkt;
+ int retval;
+
+ /* Read packet from the ring */
+ retval = rte_ring_sc_dequeue_bulk(conf->rx_ring, (void **)mbufs,
+ burst_conf.ring_burst);
+ if (likely(retval == 0)) {
+ int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
+ burst_conf.ring_burst);
+
+ APP_STATS_ADD(conf->stat.nb_drop, burst_conf.ring_burst - nb_sent);
+ APP_STATS_ADD(conf->stat.nb_rx, burst_conf.ring_burst);
+ }
+
+
+ nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
+ burst_conf.qos_dequeue);
+ if (likely(nb_pkt > 0)) {
+ app_send_packets(conf, mbufs, nb_pkt);
+
+ conf->counter = 0; /* reset empty read loop counter */
+ }
+
+ conf->counter++;
+
+ /* drain ring and TX queues */
+ if (unlikely(conf->counter > drain_tsc)) {
+
+ /* now check is there any packets left to be transmitted */
+ if (conf->n_mbufs != 0) {
+ app_send_burst(conf);
+
+ conf->n_mbufs = 0;
+ }
+ conf->counter = 0;
+ }
+
+ conf_idx++;
+ if (confs[conf_idx] == NULL)
+ conf_idx = 0;
+ }
+}
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
new file mode 100644
index 00000000..3e7fd087
--- /dev/null
+++ b/examples/qos_sched/args.c
@@ -0,0 +1,485 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <locale.h>
+#include <unistd.h>
+#include <limits.h>
+#include <getopt.h>
+
+#include <rte_log.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_string_fns.h>
+
+#include "main.h"
+
+#define APP_NAME "qos_sched"
+#define MAX_OPT_VALUES 8
+#define SYS_CPU_DIR "/sys/devices/system/cpu/cpu%u/topology/"
+
+static uint32_t app_master_core = 1;
+static uint32_t app_numa_mask;
+static uint64_t app_used_core_mask = 0;
+static uint64_t app_used_port_mask = 0;
+static uint64_t app_used_rx_port_mask = 0;
+static uint64_t app_used_tx_port_mask = 0;
+
+
+static const char usage[] =
+ " \n"
+ " %s <APP PARAMS> \n"
+ " \n"
+ "Application mandatory parameters: \n"
+ " --pfc \"RX PORT, TX PORT, RX LCORE, WT LCORE\" : Packet flow configuration \n"
+ " multiple pfc can be configured in command line \n"
+ " \n"
+ "Application optional parameters: \n"
+ " --i : run in interactive mode (default value is %u) \n"
+ " --mst I : master core index (default value is %u) \n"
+ " --rsz \"A, B, C\" : Ring sizes \n"
+ " A = Size (in number of buffer descriptors) of each of the NIC RX \n"
+ " rings read by the I/O RX lcores (default value is %u) \n"
+ " B = Size (in number of elements) of each of the SW rings used by the\n"
+ " I/O RX lcores to send packets to worker lcores (default value is\n"
+ " %u) \n"
+ " C = Size (in number of buffer descriptors) of each of the NIC TX \n"
+ " rings written by worker lcores (default value is %u) \n"
+ " --bsz \"A, B, C, D\": Burst sizes \n"
+ " A = I/O RX lcore read burst size from NIC RX (default value is %u) \n"
+ " B = I/O RX lcore write burst size to output SW rings, \n"
+ " Worker lcore read burst size from input SW rings, \n"
+ " QoS enqueue size (default value is %u) \n"
+ " C = QoS dequeue size (default value is %u) \n"
+ " D = Worker lcore write burst size to NIC TX (default value is %u) \n"
+ " --msz M : Mempool size (in number of mbufs) for each pfc (default %u) \n"
+ " --rth \"A, B, C\" : RX queue threshold parameters \n"
+ " A = RX prefetch threshold (default value is %u) \n"
+ " B = RX host threshold (default value is %u) \n"
+ " C = RX write-back threshold (default value is %u) \n"
+ " --tth \"A, B, C\" : TX queue threshold parameters \n"
+ " A = TX prefetch threshold (default value is %u) \n"
+ " B = TX host threshold (default value is %u) \n"
+ " C = TX write-back threshold (default value is %u) \n"
+ " --cfg FILE : profile configuration to load \n"
+;
+
+/* display usage */
+static void
+app_usage(const char *prgname)
+{
+ printf(usage, prgname, APP_INTERACTIVE_DEFAULT, app_master_core,
+ APP_RX_DESC_DEFAULT, APP_RING_SIZE, APP_TX_DESC_DEFAULT,
+ MAX_PKT_RX_BURST, PKT_ENQUEUE, PKT_DEQUEUE,
+ MAX_PKT_TX_BURST, NB_MBUF,
+ RX_PTHRESH, RX_HTHRESH, RX_WTHRESH,
+ TX_PTHRESH, TX_HTHRESH, TX_WTHRESH
+ );
+}
+
+static inline int str_is(const char *str, const char *is)
+{
+ return strcmp(str, is) == 0;
+}
+
+/* returns core mask used by DPDK */
+static uint64_t
+app_eal_core_mask(void)
+{
+ uint32_t i;
+ uint64_t cm = 0;
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (cfg->lcore_role[i] == ROLE_RTE)
+ cm |= (1ULL << i);
+ }
+
+ cm |= (1ULL << cfg->master_lcore);
+
+ return cm;
+}
+
+
+/* returns total number of cores presented in a system */
+static uint32_t
+app_cpu_core_count(void)
+{
+ int i, len;
+ char path[PATH_MAX];
+ uint32_t ncores = 0;
+
+ for(i = 0; i < RTE_MAX_LCORE; i++) {
+ len = snprintf(path, sizeof(path), SYS_CPU_DIR, i);
+ if (len <= 0 || (unsigned)len >= sizeof(path))
+ continue;
+
+ if (access(path, F_OK) == 0)
+ ncores++;
+ }
+
+ return ncores;
+}
+
+/* returns:
+ number of values parsed
+ -1 in case of error
+*/
+static int
+app_parse_opt_vals(const char *conf_str, char separator, uint32_t n_vals, uint32_t *opt_vals)
+{
+ char *string;
+ uint32_t i, n_tokens;
+ char *tokens[MAX_OPT_VALUES];
+
+ if (conf_str == NULL || opt_vals == NULL || n_vals == 0 || n_vals > MAX_OPT_VALUES)
+ return -1;
+
+ /* duplicate configuration string before splitting it to tokens */
+ string = strdup(conf_str);
+ if (string == NULL)
+ return -1;
+
+ n_tokens = rte_strsplit(string, strnlen(string, 32), tokens, n_vals, separator);
+
+ for(i = 0; i < n_tokens; i++) {
+ opt_vals[i] = (uint32_t)atol(tokens[i]);
+ }
+
+ free(string);
+
+ return n_tokens;
+}
+
+static int
+app_parse_ring_conf(const char *conf_str)
+{
+ int ret;
+ uint32_t vals[3];
+
+ ret = app_parse_opt_vals(conf_str, ',', 3, vals);
+ if (ret != 3)
+ return ret;
+
+ ring_conf.rx_size = vals[0];
+ ring_conf.ring_size = vals[1];
+ ring_conf.tx_size = vals[2];
+
+ return 0;
+}
+
+static int
+app_parse_rth_conf(const char *conf_str)
+{
+ int ret;
+ uint32_t vals[3];
+
+ ret = app_parse_opt_vals(conf_str, ',', 3, vals);
+ if (ret != 3)
+ return ret;
+
+ rx_thresh.pthresh = (uint8_t)vals[0];
+ rx_thresh.hthresh = (uint8_t)vals[1];
+ rx_thresh.wthresh = (uint8_t)vals[2];
+
+ return 0;
+}
+
+static int
+app_parse_tth_conf(const char *conf_str)
+{
+ int ret;
+ uint32_t vals[3];
+
+ ret = app_parse_opt_vals(conf_str, ',', 3, vals);
+ if (ret != 3)
+ return ret;
+
+ tx_thresh.pthresh = (uint8_t)vals[0];
+ tx_thresh.hthresh = (uint8_t)vals[1];
+ tx_thresh.wthresh = (uint8_t)vals[2];
+
+ return 0;
+}
+
+static int
+app_parse_flow_conf(const char *conf_str)
+{
+ int ret;
+ uint32_t vals[5];
+ struct flow_conf *pconf;
+ uint64_t mask;
+
+ ret = app_parse_opt_vals(conf_str, ',', 6, vals);
+ if (ret < 4 || ret > 5)
+ return ret;
+
+ pconf = &qos_conf[nb_pfc];
+
+ pconf->rx_port = (uint8_t)vals[0];
+ pconf->tx_port = (uint8_t)vals[1];
+ pconf->rx_core = (uint8_t)vals[2];
+ pconf->wt_core = (uint8_t)vals[3];
+ if (ret == 5)
+ pconf->tx_core = (uint8_t)vals[4];
+ else
+ pconf->tx_core = pconf->wt_core;
+
+ if (pconf->rx_core == pconf->wt_core) {
+ RTE_LOG(ERR, APP, "pfc %u: rx thread and worker thread cannot share same core\n", nb_pfc);
+ return -1;
+ }
+
+ if (pconf->rx_port >= RTE_MAX_ETHPORTS) {
+ RTE_LOG(ERR, APP, "pfc %u: invalid rx port %"PRIu8" index\n",
+ nb_pfc, pconf->rx_port);
+ return -1;
+ }
+ if (pconf->tx_port >= RTE_MAX_ETHPORTS) {
+ RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu8" index\n",
+ nb_pfc, pconf->rx_port);
+ return -1;
+ }
+
+ mask = 1lu << pconf->rx_port;
+ if (app_used_rx_port_mask & mask) {
+ RTE_LOG(ERR, APP, "pfc %u: rx port %"PRIu8" is used already\n",
+ nb_pfc, pconf->rx_port);
+ return -1;
+ }
+ app_used_rx_port_mask |= mask;
+ app_used_port_mask |= mask;
+
+ mask = 1lu << pconf->tx_port;
+ if (app_used_tx_port_mask & mask) {
+ RTE_LOG(ERR, APP, "pfc %u: port %"PRIu8" is used already\n",
+ nb_pfc, pconf->tx_port);
+ return -1;
+ }
+ app_used_tx_port_mask |= mask;
+ app_used_port_mask |= mask;
+
+ mask = 1lu << pconf->rx_core;
+ app_used_core_mask |= mask;
+
+ mask = 1lu << pconf->wt_core;
+ app_used_core_mask |= mask;
+
+ mask = 1lu << pconf->tx_core;
+ app_used_core_mask |= mask;
+
+ nb_pfc++;
+
+ return 0;
+}
+
+static int
+app_parse_burst_conf(const char *conf_str)
+{
+ int ret;
+ uint32_t vals[4];
+
+ ret = app_parse_opt_vals(conf_str, ',', 4, vals);
+ if (ret != 4)
+ return ret;
+
+ burst_conf.rx_burst = (uint16_t)vals[0];
+ burst_conf.ring_burst = (uint16_t)vals[1];
+ burst_conf.qos_dequeue = (uint16_t)vals[2];
+ burst_conf.tx_burst = (uint16_t)vals[3];
+
+ return 0;
+}
+
+/*
+ * Parses the argument given in the command line of the application,
+ * calculates mask for used cores and initializes EAL with calculated core mask
+ */
+int
+app_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ int option_index;
+ const char *optname;
+ char *prgname = argv[0];
+ uint32_t i, nb_lcores;
+
+ static struct option lgopts[] = {
+ { "pfc", 1, 0, 0 },
+ { "mst", 1, 0, 0 },
+ { "rsz", 1, 0, 0 },
+ { "bsz", 1, 0, 0 },
+ { "msz", 1, 0, 0 },
+ { "rth", 1, 0, 0 },
+ { "tth", 1, 0, 0 },
+ { "cfg", 1, 0, 0 },
+ { NULL, 0, 0, 0 }
+ };
+
+ /* initialize EAL first */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ return -1;
+
+ argc -= ret;
+ argv += ret;
+
+ /* set en_US locale to print big numbers with ',' */
+ setlocale(LC_NUMERIC, "en_US.utf-8");
+
+ while ((opt = getopt_long(argc, argv, "i",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ case 'i':
+ printf("Interactive-mode selected\n");
+ interactive = 1;
+ break;
+ /* long options */
+ case 0:
+ optname = lgopts[option_index].name;
+ if (str_is(optname, "pfc")) {
+ ret = app_parse_flow_conf(optarg);
+ if (ret) {
+ RTE_LOG(ERR, APP, "Invalid pipe configuration %s\n", optarg);
+ return -1;
+ }
+ break;
+ }
+ if (str_is(optname, "mst")) {
+ app_master_core = (uint32_t)atoi(optarg);
+ break;
+ }
+ if (str_is(optname, "rsz")) {
+ ret = app_parse_ring_conf(optarg);
+ if (ret) {
+ RTE_LOG(ERR, APP, "Invalid ring configuration %s\n", optarg);
+ return -1;
+ }
+ break;
+ }
+ if (str_is(optname, "bsz")) {
+ ret = app_parse_burst_conf(optarg);
+ if (ret) {
+ RTE_LOG(ERR, APP, "Invalid burst configuration %s\n", optarg);
+ return -1;
+ }
+ break;
+ }
+ if (str_is(optname, "msz")) {
+ mp_size = atoi(optarg);
+ if (mp_size <= 0) {
+ RTE_LOG(ERR, APP, "Invalid mempool size %s\n", optarg);
+ return -1;
+ }
+ break;
+ }
+ if (str_is(optname, "rth")) {
+ ret = app_parse_rth_conf(optarg);
+ if (ret) {
+ RTE_LOG(ERR, APP, "Invalid RX threshold configuration %s\n", optarg);
+ return -1;
+ }
+ break;
+ }
+ if (str_is(optname, "tth")) {
+ ret = app_parse_tth_conf(optarg);
+ if (ret) {
+ RTE_LOG(ERR, APP, "Invalid TX threshold configuration %s\n", optarg);
+ return -1;
+ }
+ break;
+ }
+ if (str_is(optname, "cfg")) {
+ cfg_profile = optarg;
+ break;
+ }
+ break;
+
+ default:
+ app_usage(prgname);
+ return -1;
+ }
+ }
+
+ /* check master core index validity */
+ for(i = 0; i <= app_master_core; i++) {
+ if (app_used_core_mask & (1u << app_master_core)) {
+ RTE_LOG(ERR, APP, "Master core index is not configured properly\n");
+ app_usage(prgname);
+ return -1;
+ }
+ }
+ app_used_core_mask |= 1u << app_master_core;
+
+ if ((app_used_core_mask != app_eal_core_mask()) ||
+ (app_master_core != rte_get_master_lcore())) {
+ RTE_LOG(ERR, APP, "EAL core mask not configured properly, must be %" PRIx64
+ " instead of %" PRIx64 "\n" , app_used_core_mask, app_eal_core_mask());
+ return -1;
+ }
+
+ if (nb_pfc == 0) {
+ RTE_LOG(ERR, APP, "Packet flow not configured!\n");
+ app_usage(prgname);
+ return -1;
+ }
+
+ /* sanity check for cores assignment */
+ nb_lcores = app_cpu_core_count();
+
+ for(i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].rx_core >= nb_lcores) {
+ RTE_LOG(ERR, APP, "pfc %u: invalid RX lcore index %u\n", i + 1,
+ qos_conf[i].rx_core);
+ return -1;
+ }
+ if (qos_conf[i].wt_core >= nb_lcores) {
+ RTE_LOG(ERR, APP, "pfc %u: invalid WT lcore index %u\n", i + 1,
+ qos_conf[i].wt_core);
+ return -1;
+ }
+ uint32_t rx_sock = rte_lcore_to_socket_id(qos_conf[i].rx_core);
+ uint32_t wt_sock = rte_lcore_to_socket_id(qos_conf[i].wt_core);
+ if (rx_sock != wt_sock) {
+ RTE_LOG(ERR, APP, "pfc %u: RX and WT must be on the same socket\n", i + 1);
+ return -1;
+ }
+ app_numa_mask |= 1 << rte_lcore_to_socket_id(qos_conf[i].rx_core);
+ }
+
+ return 0;
+}
diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c
new file mode 100644
index 00000000..94a1a221
--- /dev/null
+++ b/examples/qos_sched/cfg_file.c
@@ -0,0 +1,342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <rte_string_fns.h>
+#include <rte_sched.h>
+
+#include "cfg_file.h"
+#include "main.h"
+
+
+/** when we resize a file structure, how many extra entries
+ * for new sections do we add in */
+#define CFG_ALLOC_SECTION_BATCH 8
+/** when we resize a section structure, how many extra entries
+ * for new entries do we add in */
+#define CFG_ALLOC_ENTRY_BATCH 16
+
+int
+cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params)
+{
+ const char *entry;
+ int j;
+
+ if (!cfg || !port_params)
+ return -1;
+
+ entry = rte_cfgfile_get_entry(cfg, "port", "frame overhead");
+ if (entry)
+ port_params->frame_overhead = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, "port", "number of subports per port");
+ if (entry)
+ port_params->n_subports_per_port = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, "port", "number of pipes per subport");
+ if (entry)
+ port_params->n_pipes_per_subport = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, "port", "queue sizes");
+ if (entry) {
+ char *next;
+
+ for(j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ port_params->qsize[j] = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+#ifdef RTE_SCHED_RED
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ char str[32];
+
+ /* Parse WRED min thresholds */
+ snprintf(str, sizeof(str), "tc %d wred min", j);
+ entry = rte_cfgfile_get_entry(cfg, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].min_th
+ = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED max thresholds */
+ snprintf(str, sizeof(str), "tc %d wred max", j);
+ entry = rte_cfgfile_get_entry(cfg, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].max_th
+ = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED inverse mark probabilities */
+ snprintf(str, sizeof(str), "tc %d wred inv prob", j);
+ entry = rte_cfgfile_get_entry(cfg, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].maxp_inv
+ = (uint8_t)strtol(entry, &next, 10);
+
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED EWMA filter weights */
+ snprintf(str, sizeof(str), "tc %d wred weight", j);
+ entry = rte_cfgfile_get_entry(cfg, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].wq_log2
+ = (uint8_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ }
+#endif /* RTE_SCHED_RED */
+
+ return 0;
+}
+
+int
+cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe_params)
+{
+ int i, j;
+ char *next;
+ const char *entry;
+ int profiles;
+
+ if (!cfg || !pipe_params)
+ return -1;
+
+ profiles = rte_cfgfile_num_sections(cfg, "pipe profile", sizeof("pipe profile") - 1);
+ port_params.n_pipe_profiles = profiles;
+
+ for (j = 0; j < profiles; j++) {
+ char pipe_name[32];
+ snprintf(pipe_name, sizeof(pipe_name), "pipe profile %d", j);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tb rate");
+ if (entry)
+ pipe_params[j].tb_rate = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tb size");
+ if (entry)
+ pipe_params[j].tb_size = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc period");
+ if (entry)
+ pipe_params[j].tc_period = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 0 rate");
+ if (entry)
+ pipe_params[j].tc_rate[0] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 1 rate");
+ if (entry)
+ pipe_params[j].tc_rate[1] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 2 rate");
+ if (entry)
+ pipe_params[j].tc_rate[2] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 rate");
+ if (entry)
+ pipe_params[j].tc_rate[3] = (uint32_t)atoi(entry);
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 oversubscription weight");
+ if (entry)
+ pipe_params[j].tc_ov_weight = (uint8_t)atoi(entry);
+#endif
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 0 wrr weights");
+ if (entry) {
+ for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*0 + i] =
+ (uint8_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 1 wrr weights");
+ if (entry) {
+ for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*1 + i] =
+ (uint8_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 2 wrr weights");
+ if (entry) {
+ for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*2 + i] =
+ (uint8_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 wrr weights");
+ if (entry) {
+ for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*3 + i] =
+ (uint8_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ }
+ return 0;
+}
+
+int
+cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subport_params)
+{
+ const char *entry;
+ int i, j, k;
+
+ if (!cfg || !subport_params)
+ return -1;
+
+ memset(app_pipe_to_profile, -1, sizeof(app_pipe_to_profile));
+
+ for (i = 0; i < MAX_SCHED_SUBPORTS; i++) {
+ char sec_name[CFG_NAME_LEN];
+ snprintf(sec_name, sizeof(sec_name), "subport %d", i);
+
+ if (rte_cfgfile_has_section(cfg, sec_name)) {
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tb rate");
+ if (entry)
+ subport_params[i].tb_rate = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tb size");
+ if (entry)
+ subport_params[i].tb_size = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc period");
+ if (entry)
+ subport_params[i].tc_period = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 0 rate");
+ if (entry)
+ subport_params[i].tc_rate[0] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 1 rate");
+ if (entry)
+ subport_params[i].tc_rate[1] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 2 rate");
+ if (entry)
+ subport_params[i].tc_rate[2] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 3 rate");
+ if (entry)
+ subport_params[i].tc_rate[3] = (uint32_t)atoi(entry);
+
+ int n_entries = rte_cfgfile_section_num_entries(cfg, sec_name);
+ struct rte_cfgfile_entry entries[n_entries];
+
+ rte_cfgfile_section_entries(cfg, sec_name, entries, n_entries);
+
+ for (j = 0; j < n_entries; j++) {
+ if (strncmp("pipe", entries[j].name, sizeof("pipe") - 1) == 0) {
+ int profile;
+ char *tokens[2] = {NULL, NULL};
+ int n_tokens;
+ int begin, end;
+
+ profile = atoi(entries[j].value);
+ n_tokens = rte_strsplit(&entries[j].name[sizeof("pipe")],
+ strnlen(entries[j].name, CFG_NAME_LEN), tokens, 2, '-');
+
+ begin = atoi(tokens[0]);
+ if (n_tokens == 2)
+ end = atoi(tokens[1]);
+ else
+ end = begin;
+
+ if (end >= MAX_SCHED_PIPES || begin > end)
+ return -1;
+
+ for (k = begin; k <= end; k++) {
+ char profile_name[CFG_NAME_LEN];
+
+ snprintf(profile_name, sizeof(profile_name),
+ "pipe profile %d", profile);
+ if (rte_cfgfile_has_section(cfg, profile_name))
+ app_pipe_to_profile[i][k] = profile;
+ else
+ rte_exit(EXIT_FAILURE, "Wrong pipe profile %s\n",
+ entries[j].value);
+
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/examples/qos_sched/cfg_file.h b/examples/qos_sched/cfg_file.h
new file mode 100644
index 00000000..cc5a2cd5
--- /dev/null
+++ b/examples/qos_sched/cfg_file.h
@@ -0,0 +1,46 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CFG_FILE_H__
+#define __CFG_FILE_H__
+
+#include <rte_sched.h>
+#include <rte_cfgfile.h>
+
+int cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port);
+
+int cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe);
+
+int cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subport);
+
+#endif
diff --git a/examples/qos_sched/cmdline.c b/examples/qos_sched/cmdline.c
new file mode 100644
index 00000000..f79d5246
--- /dev/null
+++ b/examples/qos_sched/cmdline.c
@@ -0,0 +1,643 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <termios.h>
+#include <inttypes.h>
+#include <string.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "main.h"
+
+/* *** Help command with introduction. *** */
+struct cmd_help_result {
+ cmdline_fixed_string_t help;
+};
+
+static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ cmdline_printf(
+ cl,
+ "\n"
+ "The following commands are currently available:\n\n"
+ "Control:\n"
+ " quit : Quit the application.\n"
+ "\nStatistics:\n"
+ " stats app : Show app statistics.\n"
+ " stats port X subport Y : Show stats of a specific subport.\n"
+ " stats port X subport Y pipe Z : Show stats of a specific pipe.\n"
+ "\nAverage queue size:\n"
+ " qavg port X subport Y : Show average queue size per subport.\n"
+ " qavg port X subport Y tc Z : Show average queue size per subport and TC.\n"
+ " qavg port X subport Y pipe Z : Show average queue size per pipe.\n"
+ " qavg port X subport Y pipe Z tc A : Show average queue size per pipe and TC.\n"
+ " qavg port X subport Y pipe Z tc A q B : Show average queue size of a specific queue.\n"
+ " qavg [n|period] X : Set number of times and peiod (us).\n\n"
+ );
+
+}
+
+cmdline_parse_token_string_t cmd_help_help =
+ TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
+
+cmdline_parse_inst_t cmd_help = {
+ .f = cmd_help_parsed,
+ .data = NULL,
+ .help_str = "show help",
+ .tokens = {
+ (void *)&cmd_help_help,
+ NULL,
+ },
+};
+
+/* *** QUIT *** */
+struct cmd_quit_result {
+ cmdline_fixed_string_t quit;
+};
+
+static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ cmdline_quit(cl);
+}
+
+cmdline_parse_token_string_t cmd_quit_quit =
+ TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+cmdline_parse_inst_t cmd_quit = {
+ .f = cmd_quit_parsed,
+ .data = NULL,
+ .help_str = "exit application",
+ .tokens = {
+ (void *)&cmd_quit_quit,
+ NULL,
+ },
+};
+
+/* *** SET QAVG PARAMETERS *** */
+struct cmd_setqavg_result {
+ cmdline_fixed_string_t qavg_string;
+ cmdline_fixed_string_t param_string;
+ uint32_t number;
+};
+
+static void cmd_setqavg_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_setqavg_result *res = parsed_result;
+
+ if (!strcmp(res->param_string, "period"))
+ qavg_period = res->number;
+ else if (!strcmp(res->param_string, "n"))
+ qavg_ntimes = res->number;
+ else
+ printf("\nUnknown parameter.\n\n");
+}
+
+cmdline_parse_token_string_t cmd_setqavg_qavg_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_setqavg_result, qavg_string,
+ "qavg");
+cmdline_parse_token_string_t cmd_setqavg_param_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_setqavg_result, param_string,
+ "period#n");
+cmdline_parse_token_num_t cmd_setqavg_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_setqavg_result, number,
+ UINT32);
+
+cmdline_parse_inst_t cmd_setqavg = {
+ .f = cmd_setqavg_parsed,
+ .data = NULL,
+ .help_str = "Show subport stats.",
+ .tokens = {
+ (void *)&cmd_setqavg_qavg_string,
+ (void *)&cmd_setqavg_param_string,
+ (void *)&cmd_setqavg_number,
+ NULL,
+ },
+};
+
+/* *** SHOW APP STATS *** */
+struct cmd_appstats_result {
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t app_string;
+};
+
+static void cmd_appstats_parsed(__attribute__((unused)) void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ app_stat();
+}
+
+cmdline_parse_token_string_t cmd_appstats_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_appstats_result, stats_string,
+ "stats");
+cmdline_parse_token_string_t cmd_appstats_app_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_appstats_result, app_string,
+ "app");
+
+cmdline_parse_inst_t cmd_appstats = {
+ .f = cmd_appstats_parsed,
+ .data = NULL,
+ .help_str = "Show app stats.",
+ .tokens = {
+ (void *)&cmd_appstats_stats_string,
+ (void *)&cmd_appstats_app_string,
+ NULL,
+ },
+};
+
+/* *** SHOW SUBPORT STATS *** */
+struct cmd_subportstats_result {
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t port_string;
+ uint8_t port_number;
+ cmdline_fixed_string_t subport_string;
+ uint32_t subport_number;
+};
+
+static void cmd_subportstats_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_subportstats_result *res = parsed_result;
+
+ if (subport_stat(res->port_number, res->subport_number) < 0)
+ printf ("\nStats not available for these parameters. Check that both the port and subport are correct.\n\n");
+}
+
+cmdline_parse_token_string_t cmd_subportstats_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_subportstats_result, stats_string,
+ "stats");
+cmdline_parse_token_string_t cmd_subportstats_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_subportstats_result, port_string,
+ "port");
+cmdline_parse_token_string_t cmd_subportstats_subport_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_subportstats_result, subport_string,
+ "subport");
+cmdline_parse_token_num_t cmd_subportstats_subport_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_subportstats_result, subport_number,
+ UINT32);
+cmdline_parse_token_num_t cmd_subportstats_port_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_subportstats_result, port_number,
+ UINT8);
+
+cmdline_parse_inst_t cmd_subportstats = {
+ .f = cmd_subportstats_parsed,
+ .data = NULL,
+ .help_str = "Show subport stats.",
+ .tokens = {
+ (void *)&cmd_subportstats_stats_string,
+ (void *)&cmd_subportstats_port_string,
+ (void *)&cmd_subportstats_port_number,
+ (void *)&cmd_subportstats_subport_string,
+ (void *)&cmd_subportstats_subport_number,
+ NULL,
+ },
+};
+
+/* *** SHOW PIPE STATS *** */
+struct cmd_pipestats_result {
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t port_string;
+ uint8_t port_number;
+ cmdline_fixed_string_t subport_string;
+ uint32_t subport_number;
+ cmdline_fixed_string_t pipe_string;
+ uint32_t pipe_number;
+};
+
+static void cmd_pipestats_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_pipestats_result *res = parsed_result;
+
+ if (pipe_stat(res->port_number, res->subport_number, res->pipe_number) < 0)
+ printf ("\nStats not available for these parameters. Check that both the port and subport are correct.\n\n");
+}
+
+cmdline_parse_token_string_t cmd_pipestats_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipestats_result, stats_string,
+ "stats");
+cmdline_parse_token_string_t cmd_pipestats_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipestats_result, port_string,
+ "port");
+cmdline_parse_token_num_t cmd_pipestats_port_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipestats_result, port_number,
+ UINT8);
+cmdline_parse_token_string_t cmd_pipestats_subport_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipestats_result, subport_string,
+ "subport");
+cmdline_parse_token_num_t cmd_pipestats_subport_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipestats_result, subport_number,
+ UINT32);
+cmdline_parse_token_string_t cmd_pipestats_pipe_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipestats_result, pipe_string,
+ "pipe");
+cmdline_parse_token_num_t cmd_pipestats_pipe_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipestats_result, pipe_number,
+ UINT32);
+
+cmdline_parse_inst_t cmd_pipestats = {
+ .f = cmd_pipestats_parsed,
+ .data = NULL,
+ .help_str = "Show pipe stats.",
+ .tokens = {
+ (void *)&cmd_pipestats_stats_string,
+ (void *)&cmd_pipestats_port_string,
+ (void *)&cmd_pipestats_port_number,
+ (void *)&cmd_pipestats_subport_string,
+ (void *)&cmd_pipestats_subport_number,
+ (void *)&cmd_pipestats_pipe_string,
+ (void *)&cmd_pipestats_pipe_number,
+ NULL,
+ },
+};
+
+/* *** SHOW AVERAGE QUEUE SIZE (QUEUE) *** */
+struct cmd_avg_q_result {
+ cmdline_fixed_string_t qavg_string;
+ cmdline_fixed_string_t port_string;
+ uint8_t port_number;
+ cmdline_fixed_string_t subport_string;
+ uint32_t subport_number;
+ cmdline_fixed_string_t pipe_string;
+ uint32_t pipe_number;
+ cmdline_fixed_string_t tc_string;
+ uint8_t tc_number;
+ cmdline_fixed_string_t q_string;
+ uint8_t q_number;
+};
+
+static void cmd_avg_q_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_avg_q_result *res = parsed_result;
+
+ if (qavg_q(res->port_number, res->subport_number, res->pipe_number, res->tc_number, res->q_number) < 0)
+ printf ("\nStats not available for these parameters. Check that both the port and subport are correct.\n\n");
+}
+
+cmdline_parse_token_string_t cmd_avg_q_qavg_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, qavg_string,
+ "qavg");
+cmdline_parse_token_string_t cmd_avg_q_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, port_string,
+ "port");
+cmdline_parse_token_num_t cmd_avg_q_port_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, port_number,
+ UINT8);
+cmdline_parse_token_string_t cmd_avg_q_subport_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, subport_string,
+ "subport");
+cmdline_parse_token_num_t cmd_avg_q_subport_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, subport_number,
+ UINT32);
+cmdline_parse_token_string_t cmd_avg_q_pipe_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, pipe_string,
+ "pipe");
+cmdline_parse_token_num_t cmd_avg_q_pipe_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, pipe_number,
+ UINT32);
+cmdline_parse_token_string_t cmd_avg_q_tc_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, tc_string,
+ "tc");
+cmdline_parse_token_num_t cmd_avg_q_tc_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, tc_number,
+ UINT8);
+cmdline_parse_token_string_t cmd_avg_q_q_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, q_string,
+ "q");
+cmdline_parse_token_num_t cmd_avg_q_q_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, q_number,
+ UINT8);
+
+cmdline_parse_inst_t cmd_avg_q = {
+ .f = cmd_avg_q_parsed,
+ .data = NULL,
+ .help_str = "Show pipe stats.",
+ .tokens = {
+ (void *)&cmd_avg_q_qavg_string,
+ (void *)&cmd_avg_q_port_string,
+ (void *)&cmd_avg_q_port_number,
+ (void *)&cmd_avg_q_subport_string,
+ (void *)&cmd_avg_q_subport_number,
+ (void *)&cmd_avg_q_pipe_string,
+ (void *)&cmd_avg_q_pipe_number,
+ (void *)&cmd_avg_q_tc_string,
+ (void *)&cmd_avg_q_tc_number,
+ (void *)&cmd_avg_q_q_string,
+ (void *)&cmd_avg_q_q_number,
+ NULL,
+ },
+};
+
+/* *** SHOW AVERAGE QUEUE SIZE (tc/pipe) *** */
+struct cmd_avg_tcpipe_result {
+ cmdline_fixed_string_t qavg_string;
+ cmdline_fixed_string_t port_string;
+ uint8_t port_number;
+ cmdline_fixed_string_t subport_string;
+ uint32_t subport_number;
+ cmdline_fixed_string_t pipe_string;
+ uint32_t pipe_number;
+ cmdline_fixed_string_t tc_string;
+ uint8_t tc_number;
+};
+
+static void cmd_avg_tcpipe_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_avg_tcpipe_result *res = parsed_result;
+
+ if (qavg_tcpipe(res->port_number, res->subport_number, res->pipe_number, res->tc_number) < 0)
+ printf ("\nStats not available for these parameters. Check that both the port and subport are correct.\n\n");
+}
+
+cmdline_parse_token_string_t cmd_avg_tcpipe_qavg_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcpipe_result, qavg_string,
+ "qavg");
+cmdline_parse_token_string_t cmd_avg_tcpipe_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcpipe_result, port_string,
+ "port");
+cmdline_parse_token_num_t cmd_avg_tcpipe_port_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_tcpipe_result, port_number,
+ UINT8);
+cmdline_parse_token_string_t cmd_avg_tcpipe_subport_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcpipe_result, subport_string,
+ "subport");
+cmdline_parse_token_num_t cmd_avg_tcpipe_subport_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_tcpipe_result, subport_number,
+ UINT32);
+cmdline_parse_token_string_t cmd_avg_tcpipe_pipe_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcpipe_result, pipe_string,
+ "pipe");
+cmdline_parse_token_num_t cmd_avg_tcpipe_pipe_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_tcpipe_result, pipe_number,
+ UINT32);
+cmdline_parse_token_string_t cmd_avg_tcpipe_tc_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcpipe_result, tc_string,
+ "tc");
+cmdline_parse_token_num_t cmd_avg_tcpipe_tc_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_tcpipe_result, tc_number,
+ UINT8);
+
+cmdline_parse_inst_t cmd_avg_tcpipe = {
+ .f = cmd_avg_tcpipe_parsed,
+ .data = NULL,
+ .help_str = "Show pipe stats.",
+ .tokens = {
+ (void *)&cmd_avg_tcpipe_qavg_string,
+ (void *)&cmd_avg_tcpipe_port_string,
+ (void *)&cmd_avg_tcpipe_port_number,
+ (void *)&cmd_avg_tcpipe_subport_string,
+ (void *)&cmd_avg_tcpipe_subport_number,
+ (void *)&cmd_avg_tcpipe_pipe_string,
+ (void *)&cmd_avg_tcpipe_pipe_number,
+ (void *)&cmd_avg_tcpipe_tc_string,
+ (void *)&cmd_avg_tcpipe_tc_number,
+ NULL,
+ },
+};
+
+/* *** SHOW AVERAGE QUEUE SIZE (pipe) *** */
+struct cmd_avg_pipe_result {
+ cmdline_fixed_string_t qavg_string;
+ cmdline_fixed_string_t port_string;
+ uint8_t port_number;
+ cmdline_fixed_string_t subport_string;
+ uint32_t subport_number;
+ cmdline_fixed_string_t pipe_string;
+ uint32_t pipe_number;
+};
+
+static void cmd_avg_pipe_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_avg_pipe_result *res = parsed_result;
+
+ if (qavg_pipe(res->port_number, res->subport_number, res->pipe_number) < 0)
+ printf ("\nStats not available for these parameters. Check that both the port and subport are correct.\n\n");
+}
+
+cmdline_parse_token_string_t cmd_avg_pipe_qavg_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_pipe_result, qavg_string,
+ "qavg");
+cmdline_parse_token_string_t cmd_avg_pipe_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_pipe_result, port_string,
+ "port");
+cmdline_parse_token_num_t cmd_avg_pipe_port_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_pipe_result, port_number,
+ UINT8);
+cmdline_parse_token_string_t cmd_avg_pipe_subport_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_pipe_result, subport_string,
+ "subport");
+cmdline_parse_token_num_t cmd_avg_pipe_subport_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_pipe_result, subport_number,
+ UINT32);
+cmdline_parse_token_string_t cmd_avg_pipe_pipe_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_pipe_result, pipe_string,
+ "pipe");
+cmdline_parse_token_num_t cmd_avg_pipe_pipe_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_pipe_result, pipe_number,
+ UINT32);
+
+cmdline_parse_inst_t cmd_avg_pipe = {
+ .f = cmd_avg_pipe_parsed,
+ .data = NULL,
+ .help_str = "Show pipe stats.",
+ .tokens = {
+ (void *)&cmd_avg_pipe_qavg_string,
+ (void *)&cmd_avg_pipe_port_string,
+ (void *)&cmd_avg_pipe_port_number,
+ (void *)&cmd_avg_pipe_subport_string,
+ (void *)&cmd_avg_pipe_subport_number,
+ (void *)&cmd_avg_pipe_pipe_string,
+ (void *)&cmd_avg_pipe_pipe_number,
+ NULL,
+ },
+};
+
+/* *** SHOW AVERAGE QUEUE SIZE (tc/subport) *** */
+struct cmd_avg_tcsubport_result {
+ cmdline_fixed_string_t qavg_string;
+ cmdline_fixed_string_t port_string;
+ uint8_t port_number;
+ cmdline_fixed_string_t subport_string;
+ uint32_t subport_number;
+ cmdline_fixed_string_t tc_string;
+ uint8_t tc_number;
+};
+
+static void cmd_avg_tcsubport_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_avg_tcsubport_result *res = parsed_result;
+
+ if (qavg_tcsubport(res->port_number, res->subport_number, res->tc_number) < 0)
+ printf ("\nStats not available for these parameters. Check that both the port and subport are correct.\n\n");
+}
+
+cmdline_parse_token_string_t cmd_avg_tcsubport_qavg_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcsubport_result, qavg_string,
+ "qavg");
+cmdline_parse_token_string_t cmd_avg_tcsubport_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcsubport_result, port_string,
+ "port");
+cmdline_parse_token_num_t cmd_avg_tcsubport_port_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_tcsubport_result, port_number,
+ UINT8);
+cmdline_parse_token_string_t cmd_avg_tcsubport_subport_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcsubport_result, subport_string,
+ "subport");
+cmdline_parse_token_num_t cmd_avg_tcsubport_subport_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_tcsubport_result, subport_number,
+ UINT32);
+cmdline_parse_token_string_t cmd_avg_tcsubport_tc_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_tcsubport_result, tc_string,
+ "tc");
+cmdline_parse_token_num_t cmd_avg_tcsubport_tc_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_tcsubport_result, tc_number,
+ UINT8);
+
+cmdline_parse_inst_t cmd_avg_tcsubport = {
+ .f = cmd_avg_tcsubport_parsed,
+ .data = NULL,
+ .help_str = "Show pipe stats.",
+ .tokens = {
+ (void *)&cmd_avg_tcsubport_qavg_string,
+ (void *)&cmd_avg_tcsubport_port_string,
+ (void *)&cmd_avg_tcsubport_port_number,
+ (void *)&cmd_avg_tcsubport_subport_string,
+ (void *)&cmd_avg_tcsubport_subport_number,
+ (void *)&cmd_avg_tcsubport_tc_string,
+ (void *)&cmd_avg_tcsubport_tc_number,
+ NULL,
+ },
+};
+
+/* *** SHOW AVERAGE QUEUE SIZE (subport) *** */
+struct cmd_avg_subport_result {
+ cmdline_fixed_string_t qavg_string;
+ cmdline_fixed_string_t port_string;
+ uint8_t port_number;
+ cmdline_fixed_string_t subport_string;
+ uint32_t subport_number;
+};
+
+static void cmd_avg_subport_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_avg_subport_result *res = parsed_result;
+
+ if (qavg_subport(res->port_number, res->subport_number) < 0)
+ printf ("\nStats not available for these parameters. Check that both the port and subport are correct.\n\n");
+}
+
+cmdline_parse_token_string_t cmd_avg_subport_qavg_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_subport_result, qavg_string,
+ "qavg");
+cmdline_parse_token_string_t cmd_avg_subport_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_subport_result, port_string,
+ "port");
+cmdline_parse_token_num_t cmd_avg_subport_port_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_subport_result, port_number,
+ UINT8);
+cmdline_parse_token_string_t cmd_avg_subport_subport_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_avg_subport_result, subport_string,
+ "subport");
+cmdline_parse_token_num_t cmd_avg_subport_subport_number =
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_subport_result, subport_number,
+ UINT32);
+
+cmdline_parse_inst_t cmd_avg_subport = {
+ .f = cmd_avg_subport_parsed,
+ .data = NULL,
+ .help_str = "Show pipe stats.",
+ .tokens = {
+ (void *)&cmd_avg_subport_qavg_string,
+ (void *)&cmd_avg_subport_port_string,
+ (void *)&cmd_avg_subport_port_number,
+ (void *)&cmd_avg_subport_subport_string,
+ (void *)&cmd_avg_subport_subport_number,
+ NULL,
+ },
+};
+
+/* ******************************************************************************** */
+
+/* list of instructions */
+cmdline_parse_ctx_t main_ctx[] = {
+ (cmdline_parse_inst_t *)&cmd_help,
+ (cmdline_parse_inst_t *)&cmd_setqavg,
+ (cmdline_parse_inst_t *)&cmd_appstats,
+ (cmdline_parse_inst_t *)&cmd_subportstats,
+ (cmdline_parse_inst_t *)&cmd_pipestats,
+ (cmdline_parse_inst_t *)&cmd_avg_q,
+ (cmdline_parse_inst_t *)&cmd_avg_tcpipe,
+ (cmdline_parse_inst_t *)&cmd_avg_pipe,
+ (cmdline_parse_inst_t *)&cmd_avg_tcsubport,
+ (cmdline_parse_inst_t *)&cmd_avg_subport,
+ (cmdline_parse_inst_t *)&cmd_quit,
+ NULL,
+};
+
+/* prompt function, called from main on MASTER lcore */
+void
+prompt(void)
+{
+ struct cmdline *cl;
+
+ cl = cmdline_stdin_new(main_ctx, "qos_sched> ");
+ if (cl == NULL) {
+ return;
+ }
+ cmdline_interact(cl);
+ cmdline_stdin_exit(cl);
+}
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
new file mode 100644
index 00000000..70e12bb4
--- /dev/null
+++ b/examples/qos_sched/init.c
@@ -0,0 +1,370 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include <rte_log.h>
+#include <rte_mbuf.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_mempool.h>
+#include <rte_sched.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+#include <rte_cfgfile.h>
+
+#include "main.h"
+#include "cfg_file.h"
+
+uint32_t app_numa_mask = 0;
+static uint32_t app_inited_port_mask = 0;
+
+int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
+
+#define MAX_NAME_LEN 32
+
+struct ring_conf ring_conf = {
+ .rx_size = APP_RX_DESC_DEFAULT,
+ .ring_size = APP_RING_SIZE,
+ .tx_size = APP_TX_DESC_DEFAULT,
+};
+
+struct burst_conf burst_conf = {
+ .rx_burst = MAX_PKT_RX_BURST,
+ .ring_burst = PKT_ENQUEUE,
+ .qos_dequeue = PKT_DEQUEUE,
+ .tx_burst = MAX_PKT_TX_BURST,
+};
+
+struct ring_thresh rx_thresh = {
+ .pthresh = RX_PTHRESH,
+ .hthresh = RX_HTHRESH,
+ .wthresh = RX_WTHRESH,
+};
+
+struct ring_thresh tx_thresh = {
+ .pthresh = TX_PTHRESH,
+ .hthresh = TX_HTHRESH,
+ .wthresh = TX_WTHRESH,
+};
+
+uint32_t nb_pfc;
+const char *cfg_profile = NULL;
+int mp_size = NB_MBUF;
+struct flow_conf qos_conf[MAX_DATA_STREAMS];
+
+static const struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .max_rx_pkt_len = ETHER_MAX_LEN,
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled */
+ .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+ .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+ .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
+ .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ },
+ .txmode = {
+ .mq_mode = ETH_DCB_NONE,
+ },
+};
+
+static int
+app_init_port(uint8_t portid, struct rte_mempool *mp)
+{
+ int ret;
+ struct rte_eth_link link;
+ struct rte_eth_rxconf rx_conf;
+ struct rte_eth_txconf tx_conf;
+
+ /* check if port already initialized (multistream configuration) */
+ if (app_inited_port_mask & (1u << portid))
+ return 0;
+
+ rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
+ rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
+ rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
+ rx_conf.rx_free_thresh = 32;
+ rx_conf.rx_drop_en = 0;
+
+ tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
+ tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
+ tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
+ tx_conf.tx_free_thresh = 0;
+ tx_conf.tx_rs_thresh = 0;
+ tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS;
+
+ /* init port */
+ RTE_LOG(INFO, APP, "Initializing port %"PRIu8"... ", portid);
+ fflush(stdout);
+ ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot configure device: "
+ "err=%d, port=%"PRIu8"\n", ret, portid);
+
+ /* init one RX queue */
+ fflush(stdout);
+ ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
+ rte_eth_dev_socket_id(portid), &rx_conf, mp);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
+ "err=%d, port=%"PRIu8"\n", ret, portid);
+
+ /* init one TX queue */
+ fflush(stdout);
+ ret = rte_eth_tx_queue_setup(portid, 0,
+ (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
+ "port=%"PRIu8" queue=%d\n", ret, portid, 0);
+
+ /* Start device */
+ ret = rte_eth_dev_start(portid);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_pmd_port_start: "
+ "err=%d, port=%"PRIu8"\n", ret, portid);
+
+ printf("done: ");
+
+ /* get link status */
+ rte_eth_link_get(portid, &link);
+ if (link.link_status) {
+ printf(" Link Up - speed %u Mbps - %s\n",
+ (uint32_t) link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ } else {
+ printf(" Link Down\n");
+ }
+ rte_eth_promiscuous_enable(portid);
+
+ /* mark port as initialized */
+ app_inited_port_mask |= 1u << portid;
+
+ return 0;
+}
+
+static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
+ {
+ .tb_rate = 1250000000,
+ .tb_size = 1000000,
+
+ .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+ .tc_period = 10,
+ },
+};
+
+static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = {
+ { /* Profile #0 */
+ .tb_rate = 305175,
+ .tb_size = 1000000,
+
+ .tc_rate = {305175, 305175, 305175, 305175},
+ .tc_period = 40,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ .tc_ov_weight = 1,
+#endif
+
+ .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ },
+};
+
+struct rte_sched_port_params port_params = {
+ .name = "port_scheduler_0",
+ .socket = 0, /* computed */
+ .rate = 0, /* computed */
+ .mtu = 6 + 6 + 4 + 4 + 2 + 1500,
+ .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
+ .n_subports_per_port = 1,
+ .n_pipes_per_subport = 4096,
+ .qsize = {64, 64, 64, 64},
+ .pipe_profiles = pipe_profiles,
+ .n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params),
+
+#ifdef RTE_SCHED_RED
+ .red_params = {
+ /* Traffic Class 0 Colors Green / Yellow / Red */
+ [0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 1 - Colors Green / Yellow / Red */
+ [1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 2 - Colors Green / Yellow / Red */
+ [2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 3 - Colors Green / Yellow / Red */
+ [3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}
+ }
+#endif /* RTE_SCHED_RED */
+};
+
+static struct rte_sched_port *
+app_init_sched_port(uint32_t portid, uint32_t socketid)
+{
+ static char port_name[32]; /* static as referenced from global port_params*/
+ struct rte_eth_link link;
+ struct rte_sched_port *port = NULL;
+ uint32_t pipe, subport;
+ int err;
+
+ rte_eth_link_get((uint8_t)portid, &link);
+
+ port_params.socket = socketid;
+ port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
+ snprintf(port_name, sizeof(port_name), "port_%d", portid);
+ port_params.name = port_name;
+
+ port = rte_sched_port_config(&port_params);
+ if (port == NULL){
+ rte_exit(EXIT_FAILURE, "Unable to config sched port\n");
+ }
+
+ for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
+ err = rte_sched_subport_config(port, subport, &subport_params[subport]);
+ if (err) {
+ rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n",
+ subport, err);
+ }
+
+ for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) {
+ if (app_pipe_to_profile[subport][pipe] != -1) {
+ err = rte_sched_pipe_config(port, subport, pipe,
+ app_pipe_to_profile[subport][pipe]);
+ if (err) {
+ rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u "
+ "for profile %d, err=%d\n", pipe,
+ app_pipe_to_profile[subport][pipe], err);
+ }
+ }
+ }
+ }
+
+ return port;
+}
+
+static int
+app_load_cfg_profile(const char *profile)
+{
+ if (profile == NULL)
+ return 0;
+ struct rte_cfgfile *file = rte_cfgfile_load(profile, 0);
+ if (file == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile);
+
+ cfg_load_port(file, &port_params);
+ cfg_load_subport(file, subport_params);
+ cfg_load_pipe(file, pipe_profiles);
+
+ rte_cfgfile_close(file);
+
+ return 0;
+}
+
+int app_init(void)
+{
+ uint32_t i;
+ char ring_name[MAX_NAME_LEN];
+ char pool_name[MAX_NAME_LEN];
+
+ if (rte_eth_dev_count() == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
+
+ /* load configuration profile */
+ if (app_load_cfg_profile(cfg_profile) != 0)
+ rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
+
+ /* Initialize each active flow */
+ for(i = 0; i < nb_pfc; i++) {
+ uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
+ struct rte_ring *ring;
+
+ snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
+ ring = rte_ring_lookup(ring_name);
+ if (ring == NULL)
+ qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
+ socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ else
+ qos_conf[i].rx_ring = ring;
+
+ snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
+ ring = rte_ring_lookup(ring_name);
+ if (ring == NULL)
+ qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
+ socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ else
+ qos_conf[i].tx_ring = ring;
+
+
+ /* create the mbuf pools for each RX Port */
+ snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
+ qos_conf[i].mbuf_pool = rte_pktmbuf_pool_create(pool_name,
+ mp_size, burst_conf.rx_burst * 4, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_eth_dev_socket_id(qos_conf[i].rx_port));
+ if (qos_conf[i].mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);
+
+ app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
+ app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
+
+ qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
+ }
+
+ RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
+ rte_get_timer_hz());
+
+ RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
+ "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
+ ring_conf.tx_size);
+
+ RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
+ " Worker read/QoS enqueue = %hu,\n"
+ " QoS dequeue = %hu, Worker write = %hu\n",
+ burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
+ burst_conf.qos_dequeue, burst_conf.tx_burst);
+
+ RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
+ "TX (p = %hhu, h = %hhu, w = %hhu)\n",
+ rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
+ tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
+
+ return 0;
+}
diff --git a/examples/qos_sched/main.c b/examples/qos_sched/main.c
new file mode 100644
index 00000000..e16b164d
--- /dev/null
+++ b/examples/qos_sched/main.c
@@ -0,0 +1,254 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_branch_prediction.h>
+
+#include <rte_sched.h>
+
+#include "main.h"
+
+#define APP_MODE_NONE 0
+#define APP_RX_MODE 1
+#define APP_WT_MODE 2
+#define APP_TX_MODE 4
+
+uint8_t interactive = APP_INTERACTIVE_DEFAULT;
+uint32_t qavg_period = APP_QAVG_PERIOD;
+uint32_t qavg_ntimes = APP_QAVG_NTIMES;
+
+/* main processing loop */
+static int
+app_main_loop(__attribute__((unused))void *dummy)
+{
+ uint32_t lcore_id;
+ uint32_t i, mode;
+ uint32_t rx_idx = 0;
+ uint32_t wt_idx = 0;
+ uint32_t tx_idx = 0;
+ struct thread_conf *rx_confs[MAX_DATA_STREAMS];
+ struct thread_conf *wt_confs[MAX_DATA_STREAMS];
+ struct thread_conf *tx_confs[MAX_DATA_STREAMS];
+
+ memset(rx_confs, 0, sizeof(rx_confs));
+ memset(wt_confs, 0, sizeof(wt_confs));
+ memset(tx_confs, 0, sizeof(tx_confs));
+
+
+ mode = APP_MODE_NONE;
+ lcore_id = rte_lcore_id();
+
+ for (i = 0; i < nb_pfc; i++) {
+ struct flow_conf *flow = &qos_conf[i];
+
+ if (flow->rx_core == lcore_id) {
+ flow->rx_thread.rx_port = flow->rx_port;
+ flow->rx_thread.rx_ring = flow->rx_ring;
+ flow->rx_thread.rx_queue = flow->rx_queue;
+
+ rx_confs[rx_idx++] = &flow->rx_thread;
+
+ mode |= APP_RX_MODE;
+ }
+ if (flow->tx_core == lcore_id) {
+ flow->tx_thread.tx_port = flow->tx_port;
+ flow->tx_thread.tx_ring = flow->tx_ring;
+ flow->tx_thread.tx_queue = flow->tx_queue;
+
+ tx_confs[tx_idx++] = &flow->tx_thread;
+
+ mode |= APP_TX_MODE;
+ }
+ if (flow->wt_core == lcore_id) {
+ flow->wt_thread.rx_ring = flow->rx_ring;
+ flow->wt_thread.tx_ring = flow->tx_ring;
+ flow->wt_thread.tx_port = flow->tx_port;
+ flow->wt_thread.sched_port = flow->sched_port;
+
+ wt_confs[wt_idx++] = &flow->wt_thread;
+
+ mode |= APP_WT_MODE;
+ }
+ }
+
+ if (mode == APP_MODE_NONE) {
+ RTE_LOG(INFO, APP, "lcore %u has nothing to do\n", lcore_id);
+ return -1;
+ }
+
+ if (mode == (APP_RX_MODE | APP_WT_MODE)) {
+ RTE_LOG(INFO, APP, "lcore %u was configured for both RX and WT !!!\n",
+ lcore_id);
+ return -1;
+ }
+
+ RTE_LOG(INFO, APP, "entering main loop on lcore %u\n", lcore_id);
+ /* initialize mbuf memory */
+ if (mode == APP_RX_MODE) {
+ for (i = 0; i < rx_idx; i++) {
+ RTE_LOG(INFO, APP, "flow %u lcoreid %u "
+ "reading port %"PRIu8"\n",
+ i, lcore_id, rx_confs[i]->rx_port);
+ }
+
+ app_rx_thread(rx_confs);
+ }
+ else if (mode == (APP_TX_MODE | APP_WT_MODE)) {
+ for (i = 0; i < wt_idx; i++) {
+ wt_confs[i]->m_table = rte_malloc("table_wt", sizeof(struct rte_mbuf *)
+ * burst_conf.tx_burst, RTE_CACHE_LINE_SIZE);
+
+ if (wt_confs[i]->m_table == NULL)
+ rte_panic("flow %u unable to allocate memory buffer\n", i);
+
+ RTE_LOG(INFO, APP, "flow %u lcoreid %u sched+write "
+ "port %"PRIu8"\n",
+ i, lcore_id, wt_confs[i]->tx_port);
+ }
+
+ app_mixed_thread(wt_confs);
+ }
+ else if (mode == APP_TX_MODE) {
+ for (i = 0; i < tx_idx; i++) {
+ tx_confs[i]->m_table = rte_malloc("table_tx", sizeof(struct rte_mbuf *)
+ * burst_conf.tx_burst, RTE_CACHE_LINE_SIZE);
+
+ if (tx_confs[i]->m_table == NULL)
+ rte_panic("flow %u unable to allocate memory buffer\n", i);
+
+ RTE_LOG(INFO, APP, "flow %u lcoreid %u "
+ "writing port %"PRIu8"\n",
+ i, lcore_id, tx_confs[i]->tx_port);
+ }
+
+ app_tx_thread(tx_confs);
+ }
+ else if (mode == APP_WT_MODE){
+ for (i = 0; i < wt_idx; i++) {
+ RTE_LOG(INFO, APP, "flow %u lcoreid %u scheduling \n", i, lcore_id);
+ }
+
+ app_worker_thread(wt_confs);
+ }
+
+ return 0;
+}
+
+void
+app_stat(void)
+{
+ uint32_t i;
+ struct rte_eth_stats stats;
+ static struct rte_eth_stats rx_stats[MAX_DATA_STREAMS];
+ static struct rte_eth_stats tx_stats[MAX_DATA_STREAMS];
+
+ /* print statistics */
+ for(i = 0; i < nb_pfc; i++) {
+ struct flow_conf *flow = &qos_conf[i];
+
+ rte_eth_stats_get(flow->rx_port, &stats);
+ printf("\nRX port %"PRIu8": rx: %"PRIu64 " err: %"PRIu64
+ " no_mbuf: %"PRIu64 "\n",
+ flow->rx_port,
+ stats.ipackets - rx_stats[i].ipackets,
+ stats.ierrors - rx_stats[i].ierrors,
+ stats.rx_nombuf - rx_stats[i].rx_nombuf);
+ memcpy(&rx_stats[i], &stats, sizeof(stats));
+
+ rte_eth_stats_get(flow->tx_port, &stats);
+ printf("TX port %"PRIu8": tx: %" PRIu64 " err: %" PRIu64 "\n",
+ flow->tx_port,
+ stats.opackets - tx_stats[i].opackets,
+ stats.oerrors - tx_stats[i].oerrors);
+ memcpy(&tx_stats[i], &stats, sizeof(stats));
+
+ //printf("MP = %d\n", rte_mempool_count(conf->app_pktmbuf_pool));
+
+#if APP_COLLECT_STAT
+ printf("-------+------------+------------+\n");
+ printf(" | received | dropped |\n");
+ printf("-------+------------+------------+\n");
+ printf(" RX | %10" PRIu64 " | %10" PRIu64 " |\n",
+ flow->rx_thread.stat.nb_rx,
+ flow->rx_thread.stat.nb_drop);
+ printf("QOS+TX | %10" PRIu64 " | %10" PRIu64 " | pps: %"PRIu64 " \n",
+ flow->wt_thread.stat.nb_rx,
+ flow->wt_thread.stat.nb_drop,
+ flow->wt_thread.stat.nb_rx - flow->wt_thread.stat.nb_drop);
+ printf("-------+------------+------------+\n");
+
+ memset(&flow->rx_thread.stat, 0, sizeof(struct thread_stat));
+ memset(&flow->wt_thread.stat, 0, sizeof(struct thread_stat));
+#endif
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int ret;
+
+ ret = app_parse_args(argc, argv);
+ if (ret < 0)
+ return -1;
+
+ ret = app_init();
+ if (ret < 0)
+ return -1;
+
+ /* launch per-lcore init on every lcore */
+ rte_eal_mp_remote_launch(app_main_loop, NULL, SKIP_MASTER);
+
+ if (interactive) {
+ sleep(1);
+ prompt();
+ }
+ else {
+ /* print statistics every second */
+ while(1) {
+ sleep(1);
+ app_stat();
+ }
+ }
+
+ return 0;
+}
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
new file mode 100644
index 00000000..82aa0fae
--- /dev/null
+++ b/examples/qos_sched/main.h
@@ -0,0 +1,195 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_sched.h>
+
+#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define APP_INTERACTIVE_DEFAULT 0
+
+#define APP_RX_DESC_DEFAULT 128
+#define APP_TX_DESC_DEFAULT 256
+
+#define APP_RING_SIZE (8*1024)
+#define NB_MBUF (2*1024*1024)
+
+#define MAX_PKT_RX_BURST 64
+#define PKT_ENQUEUE 64
+#define PKT_DEQUEUE 32
+#define MAX_PKT_TX_BURST 64
+
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
+
+#define BURST_TX_DRAIN_US 100
+
+#define MAX_DATA_STREAMS (RTE_MAX_LCORE/2)
+#define MAX_SCHED_SUBPORTS 8
+#define MAX_SCHED_PIPES 4096
+
+#ifndef APP_COLLECT_STAT
+#define APP_COLLECT_STAT 1
+#endif
+
+#if APP_COLLECT_STAT
+#define APP_STATS_ADD(stat,val) (stat) += (val)
+#else
+#define APP_STATS_ADD(stat,val) do {(void) (val);} while (0)
+#endif
+
+#define APP_QAVG_NTIMES 10
+#define APP_QAVG_PERIOD 100
+
+struct thread_stat
+{
+ uint64_t nb_rx;
+ uint64_t nb_drop;
+};
+
+
+struct thread_conf
+{
+ uint32_t counter;
+ uint32_t n_mbufs;
+ struct rte_mbuf **m_table;
+
+ uint8_t rx_port;
+ uint8_t tx_port;
+ uint16_t rx_queue;
+ uint16_t tx_queue;
+ struct rte_ring *rx_ring;
+ struct rte_ring *tx_ring;
+ struct rte_sched_port *sched_port;
+
+#if APP_COLLECT_STAT
+ struct thread_stat stat;
+#endif
+} __rte_cache_aligned;
+
+
+struct flow_conf
+{
+ uint32_t rx_core;
+ uint32_t wt_core;
+ uint32_t tx_core;
+ uint8_t rx_port;
+ uint8_t tx_port;
+ uint16_t rx_queue;
+ uint16_t tx_queue;
+ struct rte_ring *rx_ring;
+ struct rte_ring *tx_ring;
+ struct rte_sched_port *sched_port;
+ struct rte_mempool *mbuf_pool;
+
+ struct thread_conf rx_thread;
+ struct thread_conf wt_thread;
+ struct thread_conf tx_thread;
+};
+
+
+struct ring_conf
+{
+ uint32_t rx_size;
+ uint32_t ring_size;
+ uint32_t tx_size;
+};
+
+struct burst_conf
+{
+ uint16_t rx_burst;
+ uint16_t ring_burst;
+ uint16_t qos_dequeue;
+ uint16_t tx_burst;
+};
+
+struct ring_thresh
+{
+ uint8_t pthresh; /**< Ring prefetch threshold. */
+ uint8_t hthresh; /**< Ring host threshold. */
+ uint8_t wthresh; /**< Ring writeback threshold. */
+};
+
+extern uint8_t interactive;
+extern uint32_t qavg_period;
+extern uint32_t qavg_ntimes;
+extern uint32_t nb_pfc;
+extern const char *cfg_profile;
+extern int mp_size;
+extern struct flow_conf qos_conf[];
+extern int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
+
+extern struct ring_conf ring_conf;
+extern struct burst_conf burst_conf;
+extern struct ring_thresh rx_thresh;
+extern struct ring_thresh tx_thresh;
+
+extern struct rte_sched_port_params port_params;
+
+int app_parse_args(int argc, char **argv);
+int app_init(void);
+
+void prompt(void);
+void app_rx_thread(struct thread_conf **qconf);
+void app_tx_thread(struct thread_conf **qconf);
+void app_worker_thread(struct thread_conf **qconf);
+void app_mixed_thread(struct thread_conf **qconf);
+
+void app_stat(void);
+int subport_stat(uint8_t port_id, uint32_t subport_id);
+int pipe_stat(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id);
+int qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q);
+int qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc);
+int qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id);
+int qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc);
+int qavg_subport(uint8_t port_id, uint32_t subport_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MAIN_H_ */
diff --git a/examples/qos_sched/profile.cfg b/examples/qos_sched/profile.cfg
new file mode 100644
index 00000000..f5b704cc
--- /dev/null
+++ b/examples/qos_sched/profile.cfg
@@ -0,0 +1,104 @@
+; BSD LICENSE
+;
+; Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; This file enables the following hierarchical scheduler configuration for each
+; 10GbE output port:
+; * Single subport (subport 0):
+; - Subport rate set to 100% of port rate
+; - Each of the 4 traffic classes has rate set to 100% of port rate
+; * 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
+; - Pipe rate set to 1/4K of port rate
+; - Each of the 4 traffic classes has rate set to 100% of pipe rate
+; - Within each traffic class, the byte-level WRR weights for the 4 queues
+; are set to 1:1:1:1
+;
+; For more details, please refer to chapter "Quality of Service (QoS) Framework"
+; of Data Plane Development Kit (DPDK) Programmer's Guide.
+
+; Port configuration
+[port]
+frame overhead = 24
+number of subports per port = 1
+number of pipes per subport = 4096
+queue sizes = 64 64 64 64
+
+; Subport configuration
+[subport 0]
+tb rate = 1250000000 ; Bytes per second
+tb size = 1000000 ; Bytes
+
+tc 0 rate = 1250000000 ; Bytes per second
+tc 1 rate = 1250000000 ; Bytes per second
+tc 2 rate = 1250000000 ; Bytes per second
+tc 3 rate = 1250000000 ; Bytes per second
+tc period = 10 ; Milliseconds
+
+pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0
+
+; Pipe configuration
+[pipe profile 0]
+tb rate = 305175 ; Bytes per second
+tb size = 1000000 ; Bytes
+
+tc 0 rate = 305175 ; Bytes per second
+tc 1 rate = 305175 ; Bytes per second
+tc 2 rate = 305175 ; Bytes per second
+tc 3 rate = 305175 ; Bytes per second
+tc period = 40 ; Milliseconds
+
+tc 3 oversubscription weight = 1
+
+tc 0 wrr weights = 1 1 1 1
+tc 1 wrr weights = 1 1 1 1
+tc 2 wrr weights = 1 1 1 1
+tc 3 wrr weights = 1 1 1 1
+
+; RED params per traffic class and color (Green / Yellow / Red)
+[red]
+tc 0 wred min = 48 40 32
+tc 0 wred max = 64 64 64
+tc 0 wred inv prob = 10 10 10
+tc 0 wred weight = 9 9 9
+
+tc 1 wred min = 48 40 32
+tc 1 wred max = 64 64 64
+tc 1 wred inv prob = 10 10 10
+tc 1 wred weight = 9 9 9
+
+tc 2 wred min = 48 40 32
+tc 2 wred max = 64 64 64
+tc 2 wred inv prob = 10 10 10
+tc 2 wred weight = 9 9 9
+
+tc 3 wred min = 48 40 32
+tc 3 wred max = 64 64 64
+tc 3 wred inv prob = 10 10 10
+tc 3 wred weight = 9 9 9
diff --git a/examples/qos_sched/profile_ov.cfg b/examples/qos_sched/profile_ov.cfg
new file mode 100644
index 00000000..33000df9
--- /dev/null
+++ b/examples/qos_sched/profile_ov.cfg
@@ -0,0 +1,90 @@
+; BSD LICENSE
+;
+; Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; Port configuration
+[port]
+frame overhead = 24
+number of subports per port = 1
+number of pipes per subport = 32
+queue sizes = 64 64 64 64
+
+; Subport configuration
+[subport 0]
+tb rate = 8400000 ; Bytes per second
+tb size = 100000 ; Bytes
+
+tc 0 rate = 8400000 ; Bytes per second
+tc 1 rate = 8400000 ; Bytes per second
+tc 2 rate = 8400000 ; Bytes per second
+tc 3 rate = 8400000 ; Bytes per second
+tc period = 10 ; Milliseconds
+
+pipe 0-31 = 0 ; These pipes are configured with pipe profile 0
+
+; Pipe configuration
+[pipe profile 0]
+tb rate = 16800000 ; Bytes per second
+tb size = 1000000 ; Bytes
+
+tc 0 rate = 16800000 ; Bytes per second
+tc 1 rate = 16800000 ; Bytes per second
+tc 2 rate = 16800000 ; Bytes per second
+tc 3 rate = 16800000 ; Bytes per second
+tc period = 28 ; Milliseconds
+
+tc 3 oversubscription weight = 1
+
+tc 0 wrr weights = 1 1 1 1
+tc 1 wrr weights = 1 1 1 1
+tc 2 wrr weights = 1 1 1 1
+tc 3 wrr weights = 1 1 1 1
+
+; RED params per traffic class and color (Green / Yellow / Red)
+[red]
+tc 0 wred min = 48 40 32
+tc 0 wred max = 64 64 64
+tc 0 wred inv prob = 10 10 10
+tc 0 wred weight = 9 9 9
+
+tc 1 wred min = 48 40 32
+tc 1 wred max = 64 64 64
+tc 1 wred inv prob = 10 10 10
+tc 1 wred weight = 9 9 9
+
+tc 2 wred min = 48 40 32
+tc 2 wred max = 64 64 64
+tc 2 wred inv prob = 10 10 10
+tc 2 wred weight = 9 9 9
+
+tc 3 wred min = 48 40 32
+tc 3 wred max = 64 64 64
+tc 3 wred inv prob = 10 10 10
+tc 3 wred weight = 9 9 9
diff --git a/examples/qos_sched/stats.c b/examples/qos_sched/stats.c
new file mode 100644
index 00000000..5c894455
--- /dev/null
+++ b/examples/qos_sched/stats.c
@@ -0,0 +1,315 @@
+/*-
+ * * BSD LICENSE
+ * *
+ * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * * All rights reserved.
+ * *
+ * * Redistribution and use in source and binary forms, with or without
+ * * modification, are permitted provided that the following conditions
+ * * are met:
+ * *
+ * * * Redistributions of source code must retain the above copyright
+ * * notice, this list of conditions and the following disclaimer.
+ * * * Redistributions in binary form must reproduce the above copyright
+ * * notice, this list of conditions and the following disclaimer in
+ * * the documentation and/or other materials provided with the
+ * * distribution.
+ * * * Neither the name of Intel Corporation nor the names of its
+ * * contributors may be used to endorse or promote products derived
+ * * from this software without specific prior written permission.
+ * *
+ * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * */
+
+#include <unistd.h>
+#include <string.h>
+
+#include "main.h"
+
+int
+qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q)
+{
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t queue_id, count, i;
+ uint32_t average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport
+ || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE || q >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+
+ queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
+ queue_id = queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + q);
+
+ average = 0;
+
+ for (count = 0; count < qavg_ntimes; count++) {
+ rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+ average += qlen;
+ usleep(qavg_period);
+ }
+
+ average /= qavg_ntimes;
+
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+ return 0;
+}
+
+int
+qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc)
+{
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t queue_id, count, i;
+ uint32_t average, part_average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport
+ || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+
+ queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
+
+ average = 0;
+
+ for (count = 0; count < qavg_ntimes; count++) {
+ part_average = 0;
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ rte_sched_queue_read_stats(port, queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + i), &stats, &qlen);
+ part_average += qlen;
+ }
+ average += part_average / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+ usleep(qavg_period);
+ }
+
+ average /= qavg_ntimes;
+
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+ return 0;
+}
+
+int
+qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
+{
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t queue_id, count, i;
+ uint32_t average, part_average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+
+ queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
+
+ average = 0;
+
+ for (count = 0; count < qavg_ntimes; count++) {
+ part_average = 0;
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
+ part_average += qlen;
+ }
+ average += part_average / (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+ usleep(qavg_period);
+ }
+
+ average /= qavg_ntimes;
+
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+ return 0;
+}
+
+int
+qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc)
+{
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t queue_id, count, i, j;
+ uint32_t average, part_average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+
+ average = 0;
+
+ for (count = 0; count < qavg_ntimes; count++) {
+ part_average = 0;
+ for (i = 0; i < port_params.n_pipes_per_subport; i++) {
+ queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + i);
+
+ for (j = 0; j < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
+ rte_sched_queue_read_stats(port, queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + j), &stats, &qlen);
+ part_average += qlen;
+ }
+ }
+
+ average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+ usleep(qavg_period);
+ }
+
+ average /= qavg_ntimes;
+
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+ return 0;
+}
+
+int
+qavg_subport(uint8_t port_id, uint32_t subport_id)
+{
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t queue_id, count, i, j;
+ uint32_t average, part_average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+
+ average = 0;
+
+ for (count = 0; count < qavg_ntimes; count++) {
+ part_average = 0;
+ for (i = 0; i < port_params.n_pipes_per_subport; i++) {
+ queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + i);
+
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
+ rte_sched_queue_read_stats(port, queue_id + j, &stats, &qlen);
+ part_average += qlen;
+ }
+ }
+
+ average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+ usleep(qavg_period);
+ }
+
+ average /= qavg_ntimes;
+
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+ return 0;
+}
+
+int
+subport_stat(uint8_t port_id, uint32_t subport_id)
+{
+ struct rte_sched_subport_stats stats;
+ struct rte_sched_port *port;
+ uint32_t tc_ov[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint8_t i;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+ memset (tc_ov, 0, sizeof(tc_ov));
+
+ rte_sched_subport_read_stats(port, subport_id, &stats, tc_ov);
+
+ printf("\n");
+ printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+ printf("| TC | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| OV Status |\n");
+ printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ printf("| %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " |\n", i,
+ stats.n_pkts_tc[i], stats.n_pkts_tc_dropped[i],
+ stats.n_bytes_tc[i], stats.n_bytes_tc_dropped[i], tc_ov[i]);
+ printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+ }
+ printf("\n");
+
+ return 0;
+}
+
+int
+pipe_stat(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
+{
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint8_t i, j;
+ uint32_t queue_id;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+
+ queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
+
+ printf("\n");
+ printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+ printf("| TC | Queue | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| Length |\n");
+ printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ for (j = 0; j < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
+
+ rte_sched_queue_read_stats(port, queue_id + (i * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + j), &stats, &qlen);
+
+ printf("| %d | %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, j,
+ stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen);
+ printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+ }
+ if (i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
+ printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+ }
+ printf("\n");
+
+ return 0;
+}