summaryrefslogtreecommitdiffstats
path: root/extras/deprecated/dpdk-hqos
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2020-01-30 20:47:37 +0100
committerDamjan Marion <dmarion@me.com>2020-01-30 21:38:14 +0000
commit548d70de68a4156d5bd4148e50a81555a0ef169f (patch)
tree42ff37482b17998f0d37601ba7cab464035ad9b8 /extras/deprecated/dpdk-hqos
parent7db6ab03db874d030a2d56ff02036c0594b46ccc (diff)
misc: deprecate dpdk hqos
Not in functional state for a long time ... Type: refactor Change-Id: I2cc1525a6d49518cbc94faf6afbf0d2d0d515f56 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'extras/deprecated/dpdk-hqos')
-rw-r--r--extras/deprecated/dpdk-hqos/api/dpdk.api82
-rwxr-xr-xextras/deprecated/dpdk-hqos/api/dpdk_api.c248
-rw-r--r--extras/deprecated/dpdk-hqos/api/dpdk_test.c285
-rw-r--r--extras/deprecated/dpdk-hqos/dpdk-hqos.diff1726
-rw-r--r--extras/deprecated/dpdk-hqos/hqos.c771
-rw-r--r--extras/deprecated/dpdk-hqos/qos_doc.md411
6 files changed, 3523 insertions, 0 deletions
diff --git a/extras/deprecated/dpdk-hqos/api/dpdk.api b/extras/deprecated/dpdk-hqos/api/dpdk.api
new file mode 100644
index 00000000000..40bc7369af4
--- /dev/null
+++ b/extras/deprecated/dpdk-hqos/api/dpdk.api
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option version = "1.0.0";
+import "vnet/interface_types.api";
+
+/** \brief DPDK interface HQoS pipe profile set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param subport - subport ID
+ @param pipe - pipe ID within its subport
+ @param profile - pipe profile ID
+*/
+autoreply define sw_interface_set_dpdk_hqos_pipe {
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ u32 subport;
+ u32 pipe;
+ u32 profile;
+ option vat_help = "rx sw_if_index <id> subport <subport-id> pipe <pipe-id> profile <profile-id>";
+};
+
+/** \brief DPDK interface HQoS subport parameters set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param subport - subport ID
+ @param tb_rate - subport token bucket rate (measured in bytes/second)
+ @param tb_size - subport token bucket size (measured in credits)
+ @param tc_rate - subport traffic class 0 .. 3 rates (measured in bytes/second)
+ @param tc_period - enforcement period for rates (measured in milliseconds)
+*/
+autoreply define sw_interface_set_dpdk_hqos_subport {
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ u32 subport;
+ u32 tb_rate;
+ u32 tb_size;
+ u32 tc_rate[4];
+ u32 tc_period;
+ option vat_help = "rx sw_if_index <id> subport <subport-id> [rate <n>] [bktsize <n>] [tc0 <n>] [tc1 <n>] [tc2 <n>] [tc3 <n>] [period <n>]\n";
+};
+
+/** \brief DPDK interface HQoS tctbl entry set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param entry - entry index ID
+ @param tc - traffic class (0 .. 3)
+ @param queue - traffic class queue (0 .. 3)
+*/
+autoreply define sw_interface_set_dpdk_hqos_tctbl {
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ u32 entry;
+ u32 tc;
+ u32 queue;
+ option vat_help = "rx sw_if_index <id> entry <n> tc <n> queue <n>";
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
+
diff --git a/extras/deprecated/dpdk-hqos/api/dpdk_api.c b/extras/deprecated/dpdk-hqos/api/dpdk_api.c
new file mode 100755
index 00000000000..8b22c650c82
--- /dev/null
+++ b/extras/deprecated/dpdk-hqos/api/dpdk_api.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vnet.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/bitmap.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <dpdk/device/dpdk.h>
+#include <vlib/pci/pci.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include <dpdk/device/dpdk_priv.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+/* define message IDs */
+#include <dpdk/api/dpdk.api_enum.h>
+#include <dpdk/api/dpdk.api_types.h>
+
+#include <vlibapi/api_helper_macros.h>
+
+static void
+ vl_api_sw_interface_set_dpdk_hqos_pipe_t_handler
+ (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp)
+{
+ vl_api_sw_interface_set_dpdk_hqos_pipe_reply_t *rmp;
+ int rv = 0;
+
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd;
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 subport = ntohl (mp->subport);
+ u32 pipe = ntohl (mp->pipe);
+ u32 profile = ntohl (mp->profile);
+ vnet_hw_interface_t *hw;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ /* hw_if & dpdk device */
+ hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index);
+
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rv = rte_sched_pipe_config (xd->hqos_ht->hqos, subport, pipe, profile);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_PIPE_REPLY);
+}
+
+static void *vl_api_sw_interface_set_dpdk_hqos_pipe_t_print
+ (vl_api_sw_interface_set_dpdk_hqos_pipe_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_pipe ");
+
+ s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index));
+
+ s = format (s, "subport %u pipe %u profile %u ",
+ ntohl (mp->subport), ntohl (mp->pipe), ntohl (mp->profile));
+
+ FINISH;
+}
+
+static void
+ vl_api_sw_interface_set_dpdk_hqos_subport_t_handler
+ (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp)
+{
+ vl_api_sw_interface_set_dpdk_hqos_subport_reply_t *rmp;
+ int rv = 0;
+
+ dpdk_main_t *dm = &dpdk_main;
+ dpdk_device_t *xd;
+ struct rte_sched_subport_params p;
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 subport = ntohl (mp->subport);
+ p.tb_rate = ntohl (mp->tb_rate);
+ p.tb_size = ntohl (mp->tb_size);
+ p.tc_rate[0] = ntohl (mp->tc_rate[0]);
+ p.tc_rate[1] = ntohl (mp->tc_rate[1]);
+ p.tc_rate[2] = ntohl (mp->tc_rate[2]);
+ p.tc_rate[3] = ntohl (mp->tc_rate[3]);
+ p.tc_period = ntohl (mp->tc_period);
+
+ vnet_hw_interface_t *hw;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ /* hw_if & dpdk device */
+ hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index);
+
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport, &p);
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_SUBPORT_REPLY);
+}
+
+static void *vl_api_sw_interface_set_dpdk_hqos_subport_t_print
+ (vl_api_sw_interface_set_dpdk_hqos_subport_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_subport ");
+
+ s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index));
+
+ s =
+ format (s,
+ "subport %u rate %u bkt_size %u tc0 %u tc1 %u tc2 %u tc3 %u period %u",
+ ntohl (mp->subport), ntohl (mp->tb_rate), ntohl (mp->tb_size),
+ ntohl (mp->tc_rate[0]), ntohl (mp->tc_rate[1]),
+ ntohl (mp->tc_rate[2]), ntohl (mp->tc_rate[3]),
+ ntohl (mp->tc_period));
+
+ FINISH;
+}
+
+static void
+ vl_api_sw_interface_set_dpdk_hqos_tctbl_t_handler
+ (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp)
+{
+ vl_api_sw_interface_set_dpdk_hqos_tctbl_reply_t *rmp;
+ int rv = 0;
+
+ dpdk_main_t *dm = &dpdk_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_device_t *xd;
+
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ u32 entry = ntohl (mp->entry);
+ u32 tc = ntohl (mp->tc);
+ u32 queue = ntohl (mp->queue);
+ u32 val, i;
+
+ vnet_hw_interface_t *hw;
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ /* hw_if & dpdk device */
+ hw = vnet_get_sup_hw_interface (dm->vnet_main, sw_if_index);
+
+ xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+
+ if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ {
+ clib_warning ("invalid traffic class !!");
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto done;
+ }
+ if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ {
+ clib_warning ("invalid queue !!");
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto done;
+ }
+
+ /* Detect the set of worker threads */
+ uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+
+ if (p == 0)
+ {
+ clib_warning ("worker thread registration AWOL !!");
+ rv = VNET_API_ERROR_INVALID_VALUE_2;
+ goto done;
+ }
+
+ vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0];
+ int worker_thread_first = tr->first_index;
+ int worker_thread_count = tr->count;
+
+ val = tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
+ for (i = 0; i < worker_thread_count; i++)
+ xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val;
+
+ BAD_SW_IF_INDEX_LABEL;
+done:
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_DPDK_HQOS_TCTBL_REPLY);
+}
+
+static void *vl_api_sw_interface_set_dpdk_hqos_tctbl_t_print
+ (vl_api_sw_interface_set_dpdk_hqos_tctbl_t * mp, void *handle)
+{
+ u8 *s;
+
+ s = format (0, "SCRIPT: sw_interface_set_dpdk_hqos_tctbl ");
+
+ s = format (s, "sw_if_index %u ", ntohl (mp->sw_if_index));
+
+ s = format (s, "entry %u tc %u queue %u",
+ ntohl (mp->entry), ntohl (mp->tc), ntohl (mp->queue));
+
+ FINISH;
+}
+
+#include <dpdk/api/dpdk.api.c>
+static clib_error_t *
+dpdk_api_init (vlib_main_t * vm)
+{
+ dpdk_main_t *dm = &dpdk_main;
+
+ /* Ask for a correctly-sized block of API message decode slots */
+ dm->msg_id_base = setup_message_id_table ();
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (dpdk_api_init) =
+{
+ .runs_after = VLIB_INITS ("dpdk_init"),
+/* *INDENT-OFF* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/dpdk-hqos/api/dpdk_test.c b/extras/deprecated/dpdk-hqos/api/dpdk_test.c
new file mode 100644
index 00000000000..951082faaf9
--- /dev/null
+++ b/extras/deprecated/dpdk-hqos/api/dpdk_test.c
@@ -0,0 +1,285 @@
+
+/*
+ * dpdk_test.c - skeleton vpp-api-test plug-in
+ *
+ * Copyright (c) <current-year> <your-organization>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/ip.h>
+
+uword unformat_sw_if_index (unformat_input_t * input, va_list * args);
+
+/* Declare message IDs */
+#include <dpdk/api/dpdk.api_enum.h>
+#include <dpdk/api/dpdk.api_types.h>
+
+typedef struct {
+ /* API message ID base */
+ u16 msg_id_base;
+ vat_main_t *vat_main;
+} dpdk_test_main_t;
+
+dpdk_test_main_t dpdk_test_main;
+
+/* M: construct, but don't yet send a message */
+#define M(T,t) \
+do { \
+ vam->result_ready = 0; \
+ mp = vl_msg_api_alloc(sizeof(*mp)); \
+ clib_memset (mp, 0, sizeof (*mp)); \
+ mp->_vl_msg_id = ntohs (VL_API_##T + dm->msg_id_base); \
+ mp->client_index = vam->my_client_index; \
+} while(0);
+
+#define M2(T,t,n) \
+do { \
+ vam->result_ready = 0; \
+ mp = vl_msg_api_alloc(sizeof(*mp)+(n)); \
+ clib_memset (mp, 0, sizeof (*mp)); \
+ mp->_vl_msg_id = ntohs (VL_API_##T + dm->msg_id_base); \
+ mp->client_index = vam->my_client_index; \
+} while(0);
+
+/* S: send a message */
+#define S (vl_msg_api_send_shmem (vam->vl_input_queue, (u8 *)&mp))
+
+/* W: wait for results, with timeout */
+#define W \
+do { \
+ timeout = vat_time_now (vam) + 1.0; \
+ \
+ while (vat_time_now (vam) < timeout) { \
+ if (vam->result_ready == 1) { \
+ return (vam->retval); \
+ } \
+ } \
+ return -99; \
+} while(0);
+
+static int
+api_sw_interface_set_dpdk_hqos_pipe (vat_main_t * vam)
+{
+ dpdk_test_main_t * dm = &dpdk_test_main;
+ unformat_input_t *i = vam->input;
+ vl_api_sw_interface_set_dpdk_hqos_pipe_t *mp;
+ f64 timeout;
+ u32 sw_if_index;
+ u8 sw_if_index_set = 0;
+ u32 subport;
+ u8 subport_set = 0;
+ u32 pipe;
+ u8 pipe_set = 0;
+ u32 profile;
+ u8 profile_set = 0;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "rx sw_if_index %u", &sw_if_index))
+ sw_if_index_set = 1;
+ else if (unformat (i, "subport %u", &subport))
+ subport_set = 1;
+ else if (unformat (i, "pipe %u", &pipe))
+ pipe_set = 1;
+ else if (unformat (i, "profile %u", &profile))
+ profile_set = 1;
+ else
+ break;
+ }
+
+ if (sw_if_index_set == 0)
+ {
+ errmsg ("missing interface name or sw_if_index");
+ return -99;
+ }
+
+ if (subport_set == 0)
+ {
+ errmsg ("missing subport ");
+ return -99;
+ }
+
+ if (pipe_set == 0)
+ {
+ errmsg ("missing pipe");
+ return -99;
+ }
+
+ if (profile_set == 0)
+ {
+ errmsg ("missing profile");
+ return -99;
+ }
+
+ M (SW_INTERFACE_SET_DPDK_HQOS_PIPE, sw_interface_set_dpdk_hqos_pipe);
+
+ mp->sw_if_index = ntohl (sw_if_index);
+ mp->subport = ntohl (subport);
+ mp->pipe = ntohl (pipe);
+ mp->profile = ntohl (profile);
+
+
+ S;
+ W;
+ /* NOTREACHED */
+ return 0;
+}
+
+static int
+api_sw_interface_set_dpdk_hqos_subport (vat_main_t * vam)
+{
+ dpdk_test_main_t * dm = &dpdk_test_main;
+ unformat_input_t *i = vam->input;
+ vl_api_sw_interface_set_dpdk_hqos_subport_t *mp;
+ f64 timeout;
+ u32 sw_if_index;
+ u8 sw_if_index_set = 0;
+ u32 subport;
+ u8 subport_set = 0;
+ u32 tb_rate = 1250000000; /* 10GbE */
+ u32 tb_size = 1000000;
+ u32 tc_rate[] = { 1250000000, 1250000000, 1250000000, 1250000000 };
+ u32 tc_period = 10;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "rx sw_if_index %u", &sw_if_index))
+ sw_if_index_set = 1;
+ else if (unformat (i, "subport %u", &subport))
+ subport_set = 1;
+ else if (unformat (i, "rate %u", &tb_rate))
+ {
+ u32 tc_id;
+
+ for (tc_id = 0; tc_id < (sizeof (tc_rate) / sizeof (tc_rate[0]));
+ tc_id++)
+ tc_rate[tc_id] = tb_rate;
+ }
+ else if (unformat (i, "bktsize %u", &tb_size))
+ ;
+ else if (unformat (i, "tc0 %u", &tc_rate[0]))
+ ;
+ else if (unformat (i, "tc1 %u", &tc_rate[1]))
+ ;
+ else if (unformat (i, "tc2 %u", &tc_rate[2]))
+ ;
+ else if (unformat (i, "tc3 %u", &tc_rate[3]))
+ ;
+ else if (unformat (i, "period %u", &tc_period))
+ ;
+ else
+ break;
+ }
+
+ if (sw_if_index_set == 0)
+ {
+ errmsg ("missing interface name or sw_if_index");
+ return -99;
+ }
+
+ if (subport_set == 0)
+ {
+ errmsg ("missing subport ");
+ return -99;
+ }
+
+ M (SW_INTERFACE_SET_DPDK_HQOS_SUBPORT, sw_interface_set_dpdk_hqos_subport);
+
+ mp->sw_if_index = ntohl (sw_if_index);
+ mp->subport = ntohl (subport);
+ mp->tb_rate = ntohl (tb_rate);
+ mp->tb_size = ntohl (tb_size);
+ mp->tc_rate[0] = ntohl (tc_rate[0]);
+ mp->tc_rate[1] = ntohl (tc_rate[1]);
+ mp->tc_rate[2] = ntohl (tc_rate[2]);
+ mp->tc_rate[3] = ntohl (tc_rate[3]);
+ mp->tc_period = ntohl (tc_period);
+
+ S;
+ W;
+ /* NOTREACHED */
+ return 0;
+}
+
+static int
+api_sw_interface_set_dpdk_hqos_tctbl (vat_main_t * vam)
+{
+ dpdk_test_main_t * dm = &dpdk_test_main;
+ unformat_input_t *i = vam->input;
+ vl_api_sw_interface_set_dpdk_hqos_tctbl_t *mp;
+ f64 timeout;
+ u32 sw_if_index;
+ u8 sw_if_index_set = 0;
+ u8 entry_set = 0;
+ u8 tc_set = 0;
+ u8 queue_set = 0;
+ u32 entry, tc, queue;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "rx sw_if_index %u", &sw_if_index))
+ sw_if_index_set = 1;
+ else if (unformat (i, "entry %d", &entry))
+ entry_set = 1;
+ else if (unformat (i, "tc %d", &tc))
+ tc_set = 1;
+ else if (unformat (i, "queue %d", &queue))
+ queue_set = 1;
+ else
+ break;
+ }
+
+ if (sw_if_index_set == 0)
+ {
+ errmsg ("missing interface name or sw_if_index");
+ return -99;
+ }
+
+ if (entry_set == 0)
+ {
+ errmsg ("missing entry ");
+ return -99;
+ }
+
+ if (tc_set == 0)
+ {
+ errmsg ("missing traffic class ");
+ return -99;
+ }
+
+ if (queue_set == 0)
+ {
+ errmsg ("missing queue ");
+ return -99;
+ }
+
+ M (SW_INTERFACE_SET_DPDK_HQOS_TCTBL, sw_interface_set_dpdk_hqos_tctbl);
+
+ mp->sw_if_index = ntohl (sw_if_index);
+ mp->entry = ntohl (entry);
+ mp->tc = ntohl (tc);
+ mp->queue = ntohl (queue);
+
+ S;
+ W;
+ /* NOTREACHED */
+ return 0;
+}
+
+#include <dpdk/api/dpdk.api_test.c>
diff --git a/extras/deprecated/dpdk-hqos/dpdk-hqos.diff b/extras/deprecated/dpdk-hqos/dpdk-hqos.diff
new file mode 100644
index 00000000000..1493461a277
--- /dev/null
+++ b/extras/deprecated/dpdk-hqos/dpdk-hqos.diff
@@ -0,0 +1,1726 @@
+diff --git a/src/plugins/dpdk/CMakeLists.txt b/src/plugins/dpdk/CMakeLists.txt
+index e0122d928..af8c80d8f 100644
+--- a/src/plugins/dpdk/CMakeLists.txt
++++ b/src/plugins/dpdk/CMakeLists.txt
+@@ -118,8 +118,6 @@ add_vpp_plugin(dpdk
+ buffer.c
+ main.c
+ thread.c
+-# api/dpdk_api.c
+-# api/dpdk_test.c
+ device/cli.c
+ device/common.c
+ device/device.c
+@@ -127,7 +125,6 @@ add_vpp_plugin(dpdk
+ device/format.c
+ device/init.c
+ device/node.c
+-# hqos/hqos.c
+ ipsec/cli.c
+ ipsec/crypto_node.c
+ ipsec/esp_decrypt.c
+@@ -142,12 +139,6 @@ add_vpp_plugin(dpdk
+ ipsec/esp_decrypt.c
+ ipsec/esp_encrypt.c
+
+-# API_FILES
+-# api/dpdk.api
+-
+-# API_TEST_SOURCES
+-# api/dpdk_test.c
+-
+ INSTALL_HEADERS
+ device/dpdk.h
+ ipsec/ipsec.h
+diff --git a/src/plugins/dpdk/device/cli.c b/src/plugins/dpdk/device/cli.c
+index 416d97360..0f771c6ba 100644
+--- a/src/plugins/dpdk/device/cli.c
++++ b/src/plugins/dpdk/device/cli.c
+@@ -40,61 +40,6 @@
+ */
+
+
+-#if 0
+-static clib_error_t *
+-get_hqos (u32 hw_if_index, u32 subport_id, dpdk_device_t ** xd,
+- dpdk_device_config_t ** devconf)
+-{
+- dpdk_main_t *dm = &dpdk_main;
+- vnet_hw_interface_t *hw;
+- struct rte_eth_dev_info dev_info;
+- struct rte_pci_device *pci_dev;
+- uword *p = 0;
+- clib_error_t *error = NULL;
+-
+-
+- if (hw_if_index == (u32) ~ 0)
+- {
+- error = clib_error_return (0, "please specify valid interface name");
+- goto done;
+- }
+-
+- if (subport_id != 0)
+- {
+- error = clib_error_return (0, "Invalid subport");
+- goto done;
+- }
+-
+- hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+- *xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+-
+- rte_eth_dev_info_get ((*xd)->port_id, &dev_info);
+-
+- pci_dev = dpdk_get_pci_device (&dev_info);
+-
+- if (pci_dev)
+- {
+- vlib_pci_addr_t pci_addr;
+-
+- pci_addr.domain = pci_dev->addr.domain;
+- pci_addr.bus = pci_dev->addr.bus;
+- pci_addr.slot = pci_dev->addr.devid;
+- pci_addr.function = pci_dev->addr.function;
+-
+- p =
+- hash_get (dm->conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+- }
+-
+- if (p)
+- (*devconf) = pool_elt_at_index (dm->conf->dev_confs, p[0]);
+- else
+- (*devconf) = &dm->conf->default_devconf;
+-
+-done:
+- return error;
+-}
+-#endif
+-
+ static clib_error_t *
+ show_dpdk_buffer (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+@@ -408,1308 +353,6 @@ VLIB_CLI_COMMAND (cmd_set_dpdk_if_desc,static) = {
+ };
+ /* *INDENT-ON* */
+
+-#if 0
+-static int
+-dpdk_device_queue_sort (void *a1, void *a2)
+-{
+- dpdk_device_and_queue_t *dq1 = a1;
+- dpdk_device_and_queue_t *dq2 = a2;
+-
+- if (dq1->device > dq2->device)
+- return 1;
+- else if (dq1->device < dq2->device)
+- return -1;
+- else if (dq1->queue_id > dq2->queue_id)
+- return 1;
+- else if (dq1->queue_id < dq2->queue_id)
+- return -1;
+- else
+- return 0;
+-}
+-
+-
+-static clib_error_t *
+-show_dpdk_if_hqos_placement (vlib_main_t * vm, unformat_input_t * input,
+- vlib_cli_command_t * cmd)
+-{
+- vlib_thread_main_t *tm = vlib_get_thread_main ();
+- dpdk_main_t *dm = &dpdk_main;
+- dpdk_device_and_queue_t *dq;
+- int cpu;
+-
+- if (tm->n_vlib_mains == 1)
+- vlib_cli_output (vm, "All interfaces are handled by main thread");
+-
+- for (cpu = 0; cpu < vec_len (dm->devices_by_hqos_cpu); cpu++)
+- {
+- if (cpu >= dm->hqos_cpu_first_index &&
+- cpu < (dm->hqos_cpu_first_index + dm->hqos_cpu_count))
+- vlib_cli_output (vm, "Thread %u (%s at lcore %u):", cpu,
+- vlib_worker_threads[cpu].name,
+- vlib_worker_threads[cpu].cpu_id);
+-
+- vec_foreach (dq, dm->devices_by_hqos_cpu[cpu])
+- {
+- u32 hw_if_index = dm->devices[dq->device].hw_if_index;
+- vnet_hw_interface_t *hi =
+- vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+- vlib_cli_output (vm, " %v queue %u", hi->name, dq->queue_id);
+- }
+- }
+- return 0;
+-}
+-
+-/*?
+- * This command is used to display the thread and core each
+- * DPDK output interface and HQoS queue is assigned too.
+- *
+- * @cliexpar
+- * Example of how to display the DPDK output interface and HQoS queue placement:
+- * @cliexstart{show dpdk interface hqos placement}
+- * Thread 1 (vpp_hqos-threads_0 at lcore 3):
+- * GigabitEthernet0/8/0 queue 0
+- * Thread 2 (vpp_hqos-threads_1 at lcore 4):
+- * GigabitEthernet0/9/0 queue 0
+- * @cliexend
+-?*/
+-/* *INDENT-OFF* */
+-VLIB_CLI_COMMAND (cmd_show_dpdk_if_hqos_placement, static) = {
+- .path = "show dpdk interface hqos placement",
+- .short_help = "show dpdk interface hqos placement",
+- .function = show_dpdk_if_hqos_placement,
+-};
+-/* *INDENT-ON* */
+-
+-static clib_error_t *
+-set_dpdk_if_hqos_placement (vlib_main_t * vm, unformat_input_t * input,
+- vlib_cli_command_t * cmd)
+-{
+- unformat_input_t _line_input, *line_input = &_line_input;
+- dpdk_main_t *dm = &dpdk_main;
+- dpdk_device_and_queue_t *dq;
+- vnet_hw_interface_t *hw;
+- dpdk_device_t *xd;
+- u32 hw_if_index = (u32) ~ 0;
+- u32 cpu = (u32) ~ 0;
+- int i;
+- clib_error_t *error = NULL;
+-
+- if (!unformat_user (input, unformat_line_input, line_input))
+- return 0;
+-
+- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+- {
+- if (unformat
+- (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+- &hw_if_index))
+- ;
+- else if (unformat (line_input, "thread %d", &cpu))
+- ;
+- else
+- {
+- error = clib_error_return (0, "parse error: '%U'",
+- format_unformat_error, line_input);
+- goto done;
+- }
+- }
+-
+- if (hw_if_index == (u32) ~ 0)
+- return clib_error_return (0, "please specify valid interface name");
+-
+- if (cpu < dm->hqos_cpu_first_index ||
+- cpu >= (dm->hqos_cpu_first_index + dm->hqos_cpu_count))
+- {
+- error = clib_error_return (0, "please specify valid thread id");
+- goto done;
+- }
+-
+- hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+- xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+-
+- for (i = 0; i < vec_len (dm->devices_by_hqos_cpu); i++)
+- {
+- vec_foreach (dq, dm->devices_by_hqos_cpu[i])
+- {
+- if (hw_if_index == dm->devices[dq->device].hw_if_index)
+- {
+- if (cpu == i) /* nothing to do */
+- goto done;
+-
+- vec_del1 (dm->devices_by_hqos_cpu[i],
+- dq - dm->devices_by_hqos_cpu[i]);
+- vec_add2 (dm->devices_by_hqos_cpu[cpu], dq, 1);
+- dq->queue_id = 0;
+- dq->device = xd->device_index;
+-
+- vec_sort_with_function (dm->devices_by_hqos_cpu[i],
+- dpdk_device_queue_sort);
+-
+- vec_sort_with_function (dm->devices_by_hqos_cpu[cpu],
+- dpdk_device_queue_sort);
+-
+- goto done;
+- }
+- }
+- }
+-
+- error = clib_error_return (0, "not found");
+-
+-done:
+- unformat_free (line_input);
+-
+- return error;
+-}
+-
+-/*?
+- * This command is used to assign a given DPDK output interface and
+- * HQoS queue to a different thread. This will not create a thread,
+- * so the thread must already exist. Use '<em>/etc/vpp/startup.conf</em>'
+- * for the initial thread creation. See @ref qos_doc for more details.
+- *
+- * @cliexpar
+- * Example of how to display the DPDK output interface and HQoS queue placement:
+- * @cliexstart{show dpdk interface hqos placement}
+- * Thread 1 (vpp_hqos-threads_0 at lcore 3):
+- * GigabitEthernet0/8/0 queue 0
+- * Thread 2 (vpp_hqos-threads_1 at lcore 4):
+- * GigabitEthernet0/9/0 queue 0
+- * @cliexend
+- * Example of how to assign a DPDK output interface and HQoS queue to a thread:
+- * @cliexcmd{set dpdk interface hqos placement GigabitEthernet0/8/0 thread 2}
+-?*/
+-/* *INDENT-OFF* */
+-VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_placement, static) = {
+- .path = "set dpdk interface hqos placement",
+- .short_help = "set dpdk interface hqos placement <interface> thread <n>",
+- .function = set_dpdk_if_hqos_placement,
+-};
+-/* *INDENT-ON* */
+-
+-static clib_error_t *
+-set_dpdk_if_hqos_pipe (vlib_main_t * vm, unformat_input_t * input,
+- vlib_cli_command_t * cmd)
+-{
+- unformat_input_t _line_input, *line_input = &_line_input;
+- dpdk_main_t *dm = &dpdk_main;
+- vnet_hw_interface_t *hw;
+- dpdk_device_t *xd;
+- u32 hw_if_index = (u32) ~ 0;
+- u32 subport_id = (u32) ~ 0;
+- u32 pipe_id = (u32) ~ 0;
+- u32 profile_id = (u32) ~ 0;
+- int rv;
+- clib_error_t *error = NULL;
+-
+- if (!unformat_user (input, unformat_line_input, line_input))
+- return 0;
+-
+- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+- {
+- if (unformat
+- (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+- &hw_if_index))
+- ;
+- else if (unformat (line_input, "subport %d", &subport_id))
+- ;
+- else if (unformat (line_input, "pipe %d", &pipe_id))
+- ;
+- else if (unformat (line_input, "profile %d", &profile_id))
+- ;
+- else
+- {
+- error = clib_error_return (0, "parse error: '%U'",
+- format_unformat_error, line_input);
+- goto done;
+- }
+- }
+-
+- if (hw_if_index == (u32) ~ 0)
+- {
+- error = clib_error_return (0, "please specify valid interface name");
+- goto done;
+- }
+-
+- hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+- xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+-
+- rv =
+- rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id,
+- profile_id);
+- if (rv)
+- {
+- error = clib_error_return (0, "pipe configuration failed");
+- goto done;
+- }
+-
+-done:
+- unformat_free (line_input);
+-
+- return error;
+-}
+-
+-/*?
+- * This command is used to change the profile associate with a HQoS pipe. The
+- * '<em><profile_id></em>' is zero based. Use the command
+- * '<em>show dpdk interface hqos</em>' to display the content of each profile.
+- * See @ref qos_doc for more details.
+- *
+- * @note
+- * Currently there is not an API to create a new HQoS pipe profile. One is
+- * created by default in the code (search for '<em>hqos_pipe_params_default</em>'').
+- * Additional profiles can be created in code and code recompiled. Then use this
+- * command to assign it.
+- *
+- * @cliexpar
+- * Example of how to assign a new profile to a HQoS pipe:
+- * @cliexcmd{set dpdk interface hqos pipe GigabitEthernet0/8/0 subport 0 pipe 2 profile 1}
+-?*/
+-/* *INDENT-OFF* */
+-VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_pipe, static) =
+-{
+- .path = "set dpdk interface hqos pipe",
+- .short_help = "set dpdk interface hqos pipe <interface> subport <subport_id> pipe <pipe_id> "
+- "profile <profile_id>",
+- .function = set_dpdk_if_hqos_pipe,
+-};
+-/* *INDENT-ON* */
+-
+-static clib_error_t *
+-set_dpdk_if_hqos_subport (vlib_main_t * vm, unformat_input_t * input,
+- vlib_cli_command_t * cmd)
+-{
+- unformat_input_t _line_input, *line_input = &_line_input;
+- dpdk_main_t *dm = &dpdk_main;
+- dpdk_device_t *xd = NULL;
+- u32 hw_if_index = (u32) ~ 0;
+- u32 subport_id = (u32) ~ 0;
+- struct rte_sched_subport_params p;
+- int rv;
+- clib_error_t *error = NULL;
+- u32 tb_rate = (u32) ~ 0;
+- u32 tb_size = (u32) ~ 0;
+- u32 tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] =
+- { (u32) ~ 0, (u32) ~ 0, (u32) ~ 0, (u32) ~ 0 };
+- u32 tc_period = (u32) ~ 0;
+- dpdk_device_config_t *devconf = NULL;
+-
+- if (!unformat_user (input, unformat_line_input, line_input))
+- return 0;
+-
+- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+- {
+- if (unformat
+- (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+- &hw_if_index))
+- ;
+- else if (unformat (line_input, "subport %d", &subport_id))
+- ;
+- else if (unformat (line_input, "rate %d", &tb_rate))
+- ;
+- else if (unformat (line_input, "bktsize %d", &tb_size))
+- ;
+- else if (unformat (line_input, "tc0 %d", &tc_rate[0]))
+- ;
+- else if (unformat (line_input, "tc1 %d", &tc_rate[1]))
+- ;
+- else if (unformat (line_input, "tc2 %d", &tc_rate[2]))
+- ;
+- else if (unformat (line_input, "tc3 %d", &tc_rate[3]))
+- ;
+- else if (unformat (line_input, "period %d", &tc_period))
+- ;
+- else
+- {
+- error = clib_error_return (0, "parse error: '%U'",
+- format_unformat_error, line_input);
+- goto done;
+- }
+- }
+-
+- error = get_hqos (hw_if_index, subport_id, &xd, &devconf);
+-
+- if (error == NULL)
+- {
+- /* Copy the current values over to local structure. */
+- memcpy (&p, &devconf->hqos.subport[subport_id], sizeof (p));
+-
+- /* Update local structure with input values. */
+- if (tb_rate != (u32) ~ 0)
+- {
+- p.tb_rate = tb_rate;
+- p.tc_rate[0] = tb_rate;
+- p.tc_rate[1] = tb_rate;
+- p.tc_rate[2] = tb_rate;
+- p.tc_rate[3] = tb_rate;
+- }
+- if (tb_size != (u32) ~ 0)
+- {
+- p.tb_size = tb_size;
+- }
+- if (tc_rate[0] != (u32) ~ 0)
+- {
+- p.tc_rate[0] = tc_rate[0];
+- }
+- if (tc_rate[1] != (u32) ~ 0)
+- {
+- p.tc_rate[1] = tc_rate[1];
+- }
+- if (tc_rate[2] != (u32) ~ 0)
+- {
+- p.tc_rate[2] = tc_rate[2];
+- }
+- if (tc_rate[3] != (u32) ~ 0)
+- {
+- p.tc_rate[3] = tc_rate[3];
+- }
+- if (tc_period != (u32) ~ 0)
+- {
+- p.tc_period = tc_period;
+- }
+-
+- /* Apply changes. */
+- rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport_id, &p);
+- if (rv)
+- {
+- error = clib_error_return (0, "subport configuration failed");
+- goto done;
+- }
+- else
+- {
+- /* Successfully applied, so save of the input values. */
+- memcpy (&devconf->hqos.subport[subport_id], &p, sizeof (p));
+- }
+- }
+-
+-done:
+- unformat_free (line_input);
+-
+- return error;
+-}
+-
+-/*?
+- * This command is used to set the subport level parameters such as token
+- * bucket rate (bytes per seconds), token bucket size (bytes), traffic class
+- * rates (bytes per seconds) and token update period (Milliseconds).
+- *
+- * By default, the '<em>rate</em>' is set to 1250000000 bytes/second (10GbE
+- * rate) and each of the four traffic classes is set to 100% of the port rate.
+- * If the '<em>rate</em>' is updated by this command, all four traffic classes
+- * are assigned the same value. Each of the four traffic classes can be updated
+- * individually.
+- *
+- * @cliexpar
+- * Example of how modify the subport attributes for a 1GbE link:
+- * @cliexcmd{set dpdk interface hqos subport GigabitEthernet0/8/0 subport 0 rate 125000000}
+-?*/
+-/* *INDENT-OFF* */
+-VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_subport, static) = {
+- .path = "set dpdk interface hqos subport",
+- .short_help = "set dpdk interface hqos subport <interface> subport <subport_id> "
+- "[rate <n>] [bktsize <n>] [tc0 <n>] [tc1 <n>] [tc2 <n>] [tc3 <n>] "
+- "[period <n>]",
+- .function = set_dpdk_if_hqos_subport,
+-};
+-/* *INDENT-ON* */
+-
+-static clib_error_t *
+-set_dpdk_if_hqos_tctbl (vlib_main_t * vm, unformat_input_t * input,
+- vlib_cli_command_t * cmd)
+-{
+- unformat_input_t _line_input, *line_input = &_line_input;
+- vlib_thread_main_t *tm = vlib_get_thread_main ();
+- dpdk_main_t *dm = &dpdk_main;
+- vnet_hw_interface_t *hw;
+- dpdk_device_t *xd;
+- u32 hw_if_index = (u32) ~ 0;
+- u32 tc = (u32) ~ 0;
+- u32 queue = (u32) ~ 0;
+- u32 entry = (u32) ~ 0;
+- u32 val, i;
+- clib_error_t *error = NULL;
+-
+- if (!unformat_user (input, unformat_line_input, line_input))
+- return 0;
+-
+- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+- {
+- if (unformat
+- (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+- &hw_if_index))
+- ;
+- else if (unformat (line_input, "entry %d", &entry))
+- ;
+- else if (unformat (line_input, "tc %d", &tc))
+- ;
+- else if (unformat (line_input, "queue %d", &queue))
+- ;
+- else
+- {
+- error = clib_error_return (0, "parse error: '%U'",
+- format_unformat_error, line_input);
+- goto done;
+- }
+- }
+-
+- if (hw_if_index == (u32) ~ 0)
+- {
+- error = clib_error_return (0, "please specify valid interface name");
+- goto done;
+- }
+- if (entry >= 64)
+- {
+- error = clib_error_return (0, "invalid entry");
+- goto done;
+- }
+- if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+- {
+- error = clib_error_return (0, "invalid traffic class");
+- goto done;
+- }
+- if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+- {
+- error = clib_error_return (0, "invalid traffic class queue");
+- goto done;
+- }
+-
+- hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+- xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+-
+- /* Detect the set of worker threads */
+- uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+- /* Should never happen, shut up Coverity warning */
+- if (p == 0)
+- {
+- error = clib_error_return (0, "no worker registrations?");
+- goto done;
+- }
+-
+- vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0];
+- int worker_thread_first = tr->first_index;
+- int worker_thread_count = tr->count;
+-
+- val = tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
+- for (i = 0; i < worker_thread_count; i++)
+- xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val;
+-
+-done:
+- unformat_free (line_input);
+-
+- return error;
+-}
+-
+-/*?
+- * This command is used to set the traffic class translation table. The
+- * traffic class translation table is used to map 64 values (0-63) to one of
+- * four traffic class and one of four HQoS input queue. Use the '<em>show
+- * dpdk interface hqos</em>' command to display the traffic class translation
+- * table. See @ref qos_doc for more details.
+- *
+- * This command has the following parameters:
+- *
+- * - <b><interface></b> - Used to specify the output interface.
+- *
+- * - <b>entry <map_val></b> - Mapped value (0-63) to assign traffic class and queue to.
+- *
+- * - <b>tc <tc_id></b> - Traffic class (0-3) to be used by the provided mapped value.
+- *
+- * - <b>queue <queue_id></b> - HQoS input queue (0-3) to be used by the provided mapped value.
+- *
+- * @cliexpar
+- * Example of how modify the traffic class translation table:
+- * @cliexcmd{set dpdk interface hqos tctbl GigabitEthernet0/8/0 entry 16 tc 2 queue 2}
+-?*/
+-/* *INDENT-OFF* */
+-VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_tctbl, static) = {
+- .path = "set dpdk interface hqos tctbl",
+- .short_help = "set dpdk interface hqos tctbl <interface> entry <map_val> tc <tc_id> queue <queue_id>",
+- .function = set_dpdk_if_hqos_tctbl,
+-};
+-/* *INDENT-ON* */
+-
+-static clib_error_t *
+-set_dpdk_if_hqos_pktfield (vlib_main_t * vm, unformat_input_t * input,
+- vlib_cli_command_t * cmd)
+-{
+- unformat_input_t _line_input, *line_input = &_line_input;
+- vlib_thread_main_t *tm = vlib_get_thread_main ();
+- dpdk_main_t *dm = &dpdk_main;
+- clib_error_t *error = NULL;
+-
+- /* Device specific data */
+- struct rte_eth_dev_info dev_info;
+- struct rte_pci_device *pci_dev;
+- dpdk_device_config_t *devconf = 0;
+- vnet_hw_interface_t *hw;
+- dpdk_device_t *xd;
+- u32 hw_if_index = (u32) ~ 0;
+-
+- /* Detect the set of worker threads */
+- uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+- /* Should never happen, shut up Coverity warning */
+- if (p == 0)
+- return clib_error_return (0, "no worker registrations?");
+-
+- vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0];
+- int worker_thread_first = tr->first_index;
+- int worker_thread_count = tr->count;
+-
+- /* Packet field configuration */
+- u64 mask = (u64) ~ 0;
+- u32 id = (u32) ~ 0;
+- u32 offset = (u32) ~ 0;
+-
+- /* HQoS params */
+- u32 n_subports_per_port, n_pipes_per_subport, tctbl_size;
+-
+- u32 i;
+-
+- /* Parse input arguments */
+- if (!unformat_user (input, unformat_line_input, line_input))
+- return 0;
+-
+- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+- {
+- if (unformat
+- (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+- &hw_if_index))
+- ;
+- else if (unformat (line_input, "id subport"))
+- id = 0;
+- else if (unformat (line_input, "id pipe"))
+- id = 1;
+- else if (unformat (line_input, "id tc"))
+- id = 2;
+- else if (unformat (line_input, "id %d", &id))
+- ;
+- else if (unformat (line_input, "offset %d", &offset))
+- ;
+- else if (unformat (line_input, "mask %llx", &mask))
+- ;
+- else
+- {
+- error = clib_error_return (0, "parse error: '%U'",
+- format_unformat_error, line_input);
+- goto done;
+- }
+- }
+-
+- /* Get interface */
+- if (hw_if_index == (u32) ~ 0)
+- {
+- error = clib_error_return (0, "please specify valid interface name");
+- goto done;
+- }
+-
+- hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+- xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+-
+- rte_eth_dev_info_get (xd->port_id, &dev_info);
+-
+- pci_dev = dpdk_get_pci_device (&dev_info);
+-
+- if (pci_dev)
+- {
+- vlib_pci_addr_t pci_addr;
+-
+- pci_addr.domain = pci_dev->addr.domain;
+- pci_addr.bus = pci_dev->addr.bus;
+- pci_addr.slot = pci_dev->addr.devid;
+- pci_addr.function = pci_dev->addr.function;
+-
+- p =
+- hash_get (dm->conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+- }
+-
+- if (p)
+- devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]);
+- else
+- devconf = &dm->conf->default_devconf;
+-
+- if (devconf->hqos_enabled == 0)
+- {
+- vlib_cli_output (vm, "HQoS disabled for this interface");
+- goto done;
+- }
+-
+- n_subports_per_port = devconf->hqos.port.n_subports_per_port;
+- n_pipes_per_subport = devconf->hqos.port.n_pipes_per_subport;
+- tctbl_size = RTE_DIM (devconf->hqos.tc_table);
+-
+- /* Validate packet field configuration: id, offset and mask */
+- if (id >= 3)
+- {
+- error = clib_error_return (0, "invalid packet field id");
+- goto done;
+- }
+-
+- switch (id)
+- {
+- case 0:
+- if (dpdk_hqos_validate_mask (mask, n_subports_per_port) != 0)
+- {
+- error = clib_error_return (0, "invalid subport ID mask "
+- "(n_subports_per_port = %u)",
+- n_subports_per_port);
+- goto done;
+- }
+- break;
+- case 1:
+- if (dpdk_hqos_validate_mask (mask, n_pipes_per_subport) != 0)
+- {
+- error = clib_error_return (0, "invalid pipe ID mask "
+- "(n_pipes_per_subport = %u)",
+- n_pipes_per_subport);
+- goto done;
+- }
+- break;
+- case 2:
+- default:
+- if (dpdk_hqos_validate_mask (mask, tctbl_size) != 0)
+- {
+- error = clib_error_return (0, "invalid TC table index mask "
+- "(TC table size = %u)", tctbl_size);
+- goto done;
+- }
+- }
+-
+- /* Propagate packet field configuration to all workers */
+- for (i = 0; i < worker_thread_count; i++)
+- switch (id)
+- {
+- case 0:
+- xd->hqos_wt[worker_thread_first + i].hqos_field0_slabpos = offset;
+- xd->hqos_wt[worker_thread_first + i].hqos_field0_slabmask = mask;
+- xd->hqos_wt[worker_thread_first + i].hqos_field0_slabshr =
+- count_trailing_zeros (mask);
+- break;
+- case 1:
+- xd->hqos_wt[worker_thread_first + i].hqos_field1_slabpos = offset;
+- xd->hqos_wt[worker_thread_first + i].hqos_field1_slabmask = mask;
+- xd->hqos_wt[worker_thread_first + i].hqos_field1_slabshr =
+- count_trailing_zeros (mask);
+- break;
+- case 2:
+- default:
+- xd->hqos_wt[worker_thread_first + i].hqos_field2_slabpos = offset;
+- xd->hqos_wt[worker_thread_first + i].hqos_field2_slabmask = mask;
+- xd->hqos_wt[worker_thread_first + i].hqos_field2_slabshr =
+- count_trailing_zeros (mask);
+- }
+-
+-done:
+- unformat_free (line_input);
+-
+- return error;
+-}
+-
+-/*?
+- * This command is used to set the packet fields required for classifying the
+- * incoming packet. As a result of classification process, packet field
+- * information will be mapped to 5 tuples (subport, pipe, traffic class, pipe,
+- * color) and stored in packet mbuf.
+- *
+- * This command has the following parameters:
+- *
+- * - <b><interface></b> - Used to specify the output interface.
+- *
+- * - <b>id subport|pipe|tc</b> - Classification occurs across three fields.
+- * This parameter indicates which of the three masks are being configured. Legacy
+- * code used 0-2 to represent these three fields, so 0-2 is still accepted.
+- * - <b>subport|0</b> - Currently only one subport is supported, so only
+- * an empty mask is supported for the subport classification.
+- * - <b>pipe|1</b> - Currently, 4096 pipes per subport are supported, so a
+- * 12-bit mask should be configure to map to the 0-4095 pipes.
+- * - <b>tc|2</b> - The translation table (see '<em>set dpdk interface hqos
+- * tctbl</em>' command) maps each value (0-63) into one of the 4 traffic classes
+- * per pipe. A 6-bit mask should be configure to map this field to a traffic class.
+- *
+- * - <b>offset <n></b> - Offset in the packet to apply the 64-bit mask for classification.
+- * The offset should be on an 8-byte boundary (0,8,16,24..).
+- *
+- * - <b>mask <hex-mask></b> - 64-bit mask to apply to packet at the given '<em>offset</em>'.
+- * Bits must be contiguous and should not include '<em>0x</em>'.
+- *
+- * The default values for the '<em>pktfield</em>' assumes Ethernet/IPv4/UDP packets with
+- * no VLAN. Adjust based on expected packet format and desired classification field.
+- * - '<em>subport</em>' is always empty (offset 0 mask 0000000000000000)
+- * - By default, '<em>pipe</em>' maps to the UDP payload bits 12 .. 23 (offset 40
+- * mask 0000000fff000000)
+- * - By default, '<em>tc</em>' maps to the DSCP field in IP header (offset 48 mask
+- * 00000000000000fc)
+- *
+- * @cliexpar
+- * Example of how modify the '<em>pipe</em>' classification filter to match VLAN:
+- * @cliexcmd{set dpdk interface hqos pktfield GigabitEthernet0/8/0 id pipe offset 8 mask 0000000000000FFF}
+-?*/
+-/* *INDENT-OFF* */
+-VLIB_CLI_COMMAND (cmd_set_dpdk_if_hqos_pktfield, static) = {
+- .path = "set dpdk interface hqos pktfield",
+- .short_help = "set dpdk interface hqos pktfield <interface> id subport|pipe|tc offset <n> "
+- "mask <hex-mask>",
+- .function = set_dpdk_if_hqos_pktfield,
+-};
+-/* *INDENT-ON* */
+-
+-static clib_error_t *
+-show_dpdk_if_hqos (vlib_main_t * vm, unformat_input_t * input,
+- vlib_cli_command_t * cmd)
+-{
+- unformat_input_t _line_input, *line_input = &_line_input;
+- vlib_thread_main_t *tm = vlib_get_thread_main ();
+- dpdk_main_t *dm = &dpdk_main;
+- vnet_hw_interface_t *hw;
+- dpdk_device_t *xd;
+- dpdk_device_config_hqos_t *cfg;
+- dpdk_device_hqos_per_hqos_thread_t *ht;
+- dpdk_device_hqos_per_worker_thread_t *wk;
+- u32 *tctbl;
+- u32 hw_if_index = (u32) ~ 0;
+- u32 profile_id, subport_id, i;
+- struct rte_eth_dev_info dev_info;
+- struct rte_pci_device *pci_dev;
+- dpdk_device_config_t *devconf = 0;
+- vlib_thread_registration_t *tr;
+- uword *p = 0;
+- clib_error_t *error = NULL;
+-
+- if (!unformat_user (input, unformat_line_input, line_input))
+- return 0;
+-
+- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+- {
+- if (unformat
+- (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+- &hw_if_index))
+- ;
+- else
+- {
+- error = clib_error_return (0, "parse error: '%U'",
+- format_unformat_error, line_input);
+- goto done;
+- }
+- }
+-
+- if (hw_if_index == (u32) ~ 0)
+- {
+- error = clib_error_return (0, "please specify interface name!!");
+- goto done;
+- }
+-
+- hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+- xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+-
+- rte_eth_dev_info_get (xd->port_id, &dev_info);
+-
+- pci_dev = dpdk_get_pci_device (&dev_info);
+-
+- if (pci_dev)
+- {
+- vlib_pci_addr_t pci_addr;
+-
+- pci_addr.domain = pci_dev->addr.domain;
+- pci_addr.bus = pci_dev->addr.bus;
+- pci_addr.slot = pci_dev->addr.devid;
+- pci_addr.function = pci_dev->addr.function;
+-
+- p =
+- hash_get (dm->conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+- }
+-
+- if (p)
+- devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]);
+- else
+- devconf = &dm->conf->default_devconf;
+-
+- if (devconf->hqos_enabled == 0)
+- {
+- vlib_cli_output (vm, "HQoS disabled for this interface");
+- goto done;
+- }
+-
+- /* Detect the set of worker threads */
+- p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+-
+- /* Should never happen, shut up Coverity warning */
+- if (p == 0)
+- {
+- error = clib_error_return (0, "no worker registrations?");
+- goto done;
+- }
+-
+- tr = (vlib_thread_registration_t *) p[0];
+-
+- cfg = &devconf->hqos;
+- ht = xd->hqos_ht;
+- wk = &xd->hqos_wt[tr->first_index];
+- tctbl = wk->hqos_tc_table;
+-
+- vlib_cli_output (vm, " Thread:");
+- vlib_cli_output (vm, " Input SWQ size = %u packets", cfg->swq_size);
+- vlib_cli_output (vm, " Enqueue burst size = %u packets",
+- ht->hqos_burst_enq);
+- vlib_cli_output (vm, " Dequeue burst size = %u packets",
+- ht->hqos_burst_deq);
+-
+- vlib_cli_output (vm,
+- " Packet field 0: slab position = %4u, slab bitmask = 0x%016llx (subport)",
+- wk->hqos_field0_slabpos, wk->hqos_field0_slabmask);
+- vlib_cli_output (vm,
+- " Packet field 1: slab position = %4u, slab bitmask = 0x%016llx (pipe)",
+- wk->hqos_field1_slabpos, wk->hqos_field1_slabmask);
+- vlib_cli_output (vm,
+- " Packet field 2: slab position = %4u, slab bitmask = 0x%016llx (tc)",
+- wk->hqos_field2_slabpos, wk->hqos_field2_slabmask);
+- vlib_cli_output (vm,
+- " Packet field 2 tc translation table: ([Mapped Value Range]: tc/queue tc/queue ...)");
+- vlib_cli_output (vm,
+- " [ 0 .. 15]: "
+- "%u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u",
+- tctbl[0] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[0] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[1] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[1] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[2] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[2] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[3] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[3] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[4] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[4] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[5] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[5] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[6] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[6] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[7] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[7] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[8] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[8] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[9] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[9] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[10] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[10] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[11] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[11] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[12] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[12] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[13] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[13] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[14] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[14] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[15] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[15] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+- vlib_cli_output (vm,
+- " [16 .. 31]: "
+- "%u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u",
+- tctbl[16] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[16] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[17] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[17] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[18] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[18] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[19] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[19] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[20] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[20] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[21] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[21] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[22] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[22] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[23] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[23] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[24] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[24] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[25] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[25] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[26] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[26] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[27] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[27] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[28] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[28] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[29] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[29] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[30] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[30] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[31] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[31] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+- vlib_cli_output (vm,
+- " [32 .. 47]: "
+- "%u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u",
+- tctbl[32] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[32] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[33] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[33] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[34] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[34] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[35] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[35] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[36] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[36] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[37] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[37] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[38] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[38] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[39] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[39] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[40] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[40] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[41] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[41] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[42] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[42] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[43] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[43] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[44] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[44] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[45] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[45] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[46] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[46] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[47] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[47] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+- vlib_cli_output (vm,
+- " [48 .. 63]: "
+- "%u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u %u/%u",
+- tctbl[48] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[48] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[49] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[49] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[50] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[50] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[51] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[51] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[52] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[52] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[53] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[53] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[54] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[54] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[55] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[55] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[56] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[56] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[57] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[57] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[58] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[58] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[59] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[59] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[60] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[60] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[61] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[61] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[62] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[62] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[63] / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+- tctbl[63] % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+- vlib_cli_output (vm, " Port:");
+- vlib_cli_output (vm, " Rate = %u bytes/second", cfg->port.rate);
+- vlib_cli_output (vm, " MTU = %u bytes", cfg->port.mtu);
+- vlib_cli_output (vm, " Frame overhead = %u bytes",
+- cfg->port.frame_overhead);
+- vlib_cli_output (vm, " Number of subports = %u",
+- cfg->port.n_subports_per_port);
+- vlib_cli_output (vm, " Number of pipes per subport = %u",
+- cfg->port.n_pipes_per_subport);
+- vlib_cli_output (vm,
+- " Packet queue size: TC0 = %u, TC1 = %u, TC2 = %u, TC3 = %u packets",
+- cfg->port.qsize[0], cfg->port.qsize[1], cfg->port.qsize[2],
+- cfg->port.qsize[3]);
+- vlib_cli_output (vm, " Number of pipe profiles = %u",
+- cfg->port.n_pipe_profiles);
+-
+- for (subport_id = 0; subport_id < vec_len (cfg->subport); subport_id++)
+- {
+- vlib_cli_output (vm, " Subport %u:", subport_id);
+- vlib_cli_output (vm, " Rate = %u bytes/second",
+- cfg->subport[subport_id].tb_rate);
+- vlib_cli_output (vm, " Token bucket size = %u bytes",
+- cfg->subport[subport_id].tb_size);
+- vlib_cli_output (vm,
+- " Traffic class rate: TC0 = %u, TC1 = %u, TC2 = %u, TC3 = %u bytes/second",
+- cfg->subport[subport_id].tc_rate[0],
+- cfg->subport[subport_id].tc_rate[1],
+- cfg->subport[subport_id].tc_rate[2],
+- cfg->subport[subport_id].tc_rate[3]);
+- vlib_cli_output (vm, " TC period = %u milliseconds",
+- cfg->subport[subport_id].tc_period);
+- }
+-
+- for (profile_id = 0; profile_id < vec_len (cfg->pipe); profile_id++)
+- {
+- vlib_cli_output (vm, " Pipe profile %u:", profile_id);
+- vlib_cli_output (vm, " Rate = %u bytes/second",
+- cfg->pipe[profile_id].tb_rate);
+- vlib_cli_output (vm, " Token bucket size = %u bytes",
+- cfg->pipe[profile_id].tb_size);
+- vlib_cli_output (vm,
+- " Traffic class rate: TC0 = %u, TC1 = %u, TC2 = %u, TC3 = %u bytes/second",
+- cfg->pipe[profile_id].tc_rate[0],
+- cfg->pipe[profile_id].tc_rate[1],
+- cfg->pipe[profile_id].tc_rate[2],
+- cfg->pipe[profile_id].tc_rate[3]);
+- vlib_cli_output (vm, " TC period = %u milliseconds",
+- cfg->pipe[profile_id].tc_period);
+-#ifdef RTE_SCHED_SUBPORT_TC_OV
+- vlib_cli_output (vm, " TC3 oversubscription_weight = %u",
+- cfg->pipe[profile_id].tc_ov_weight);
+-#endif
+-
+- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+- {
+- vlib_cli_output (vm,
+- " TC%u WRR weights: Q0 = %u, Q1 = %u, Q2 = %u, Q3 = %u",
+- i, cfg->pipe[profile_id].wrr_weights[i * 4],
+- cfg->pipe[profile_id].wrr_weights[i * 4 + 1],
+- cfg->pipe[profile_id].wrr_weights[i * 4 + 2],
+- cfg->pipe[profile_id].wrr_weights[i * 4 + 3]);
+- }
+- }
+-
+-#ifdef RTE_SCHED_RED
+- vlib_cli_output (vm, " Weighted Random Early Detection (WRED):");
+- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+- {
+- vlib_cli_output (vm, " TC%u min: G = %u, Y = %u, R = %u", i,
+- cfg->port.red_params[i][e_RTE_METER_GREEN].min_th,
+- cfg->port.red_params[i][e_RTE_METER_YELLOW].min_th,
+- cfg->port.red_params[i][e_RTE_METER_RED].min_th);
+-
+- vlib_cli_output (vm, " TC%u max: G = %u, Y = %u, R = %u", i,
+- cfg->port.red_params[i][e_RTE_METER_GREEN].max_th,
+- cfg->port.red_params[i][e_RTE_METER_YELLOW].max_th,
+- cfg->port.red_params[i][e_RTE_METER_RED].max_th);
+-
+- vlib_cli_output (vm,
+- " TC%u inverted probability: G = %u, Y = %u, R = %u",
+- i, cfg->port.red_params[i][e_RTE_METER_GREEN].maxp_inv,
+- cfg->port.red_params[i][e_RTE_METER_YELLOW].maxp_inv,
+- cfg->port.red_params[i][e_RTE_METER_RED].maxp_inv);
+-
+- vlib_cli_output (vm, " TC%u weight: R = %u, Y = %u, R = %u", i,
+- cfg->port.red_params[i][e_RTE_METER_GREEN].wq_log2,
+- cfg->port.red_params[i][e_RTE_METER_YELLOW].wq_log2,
+- cfg->port.red_params[i][e_RTE_METER_RED].wq_log2);
+- }
+-#endif
+-
+-done:
+- unformat_free (line_input);
+-
+- return error;
+-}
+-
+-/*?
+- * This command is used to display details of an output interface's HQoS
+- * settings.
+- *
+- * @cliexpar
+- * Example of how to display HQoS settings for an interfaces:
+- * @cliexstart{show dpdk interface hqos GigabitEthernet0/8/0}
+- * Thread:
+- * Input SWQ size = 4096 packets
+- * Enqueue burst size = 256 packets
+- * Dequeue burst size = 220 packets
+- * Packet field 0: slab position = 0, slab bitmask = 0x0000000000000000 (subport)
+- * Packet field 1: slab position = 40, slab bitmask = 0x0000000fff000000 (pipe)
+- * Packet field 2: slab position = 8, slab bitmask = 0x00000000000000fc (tc)
+- * Packet field 2 tc translation table: ([Mapped Value Range]: tc/queue tc/queue ...)
+- * [ 0 .. 15]: 0/0 0/1 0/2 0/3 1/0 1/1 1/2 1/3 2/0 2/1 2/2 2/3 3/0 3/1 3/2 3/3
+- * [16 .. 31]: 0/0 0/1 0/2 0/3 1/0 1/1 1/2 1/3 2/0 2/1 2/2 2/3 3/0 3/1 3/2 3/3
+- * [32 .. 47]: 0/0 0/1 0/2 0/3 1/0 1/1 1/2 1/3 2/0 2/1 2/2 2/3 3/0 3/1 3/2 3/3
+- * [48 .. 63]: 0/0 0/1 0/2 0/3 1/0 1/1 1/2 1/3 2/0 2/1 2/2 2/3 3/0 3/1 3/2 3/3
+- * Port:
+- * Rate = 1250000000 bytes/second
+- * MTU = 1514 bytes
+- * Frame overhead = 24 bytes
+- * Number of subports = 1
+- * Number of pipes per subport = 4096
+- * Packet queue size: TC0 = 64, TC1 = 64, TC2 = 64, TC3 = 64 packets
+- * Number of pipe profiles = 2
+- * Subport 0:
+- * Rate = 1250000000 bytes/second
+- * Token bucket size = 1000000 bytes
+- * Traffic class rate: TC0 = 1250000000, TC1 = 1250000000, TC2 = 1250000000, TC3 = 1250000000 bytes/second
+- * TC period = 10 milliseconds
+- * Pipe profile 0:
+- * Rate = 305175 bytes/second
+- * Token bucket size = 1000000 bytes
+- * Traffic class rate: TC0 = 305175, TC1 = 305175, TC2 = 305175, TC3 = 305175 bytes/second
+- * TC period = 40 milliseconds
+- * TC0 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+- * TC1 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+- * TC2 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+- * TC3 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+- * @cliexend
+-?*/
+-/* *INDENT-OFF* */
+-VLIB_CLI_COMMAND (cmd_show_dpdk_if_hqos, static) = {
+- .path = "show dpdk interface hqos",
+- .short_help = "show dpdk interface hqos <interface>",
+- .function = show_dpdk_if_hqos,
+-};
+-
+-/* *INDENT-ON* */
+-
+-static clib_error_t *
+-show_dpdk_hqos_queue_stats (vlib_main_t * vm, unformat_input_t * input,
+- vlib_cli_command_t * cmd)
+-{
+- unformat_input_t _line_input, *line_input = &_line_input;
+- clib_error_t *error = NULL;
+-#ifdef RTE_SCHED_COLLECT_STATS
+- dpdk_main_t *dm = &dpdk_main;
+- u32 hw_if_index = (u32) ~ 0;
+- u32 subport = (u32) ~ 0;
+- u32 pipe = (u32) ~ 0;
+- u32 tc = (u32) ~ 0;
+- u32 tc_q = (u32) ~ 0;
+- vnet_hw_interface_t *hw;
+- dpdk_device_t *xd;
+- uword *p = 0;
+- struct rte_eth_dev_info dev_info;
+- struct rte_pci_device *pci_dev;
+- dpdk_device_config_t *devconf = 0;
+- u32 qindex;
+- struct rte_sched_queue_stats stats;
+- u16 qlen;
+-
+- if (!unformat_user (input, unformat_line_input, line_input))
+- return 0;
+-
+- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+- {
+- if (unformat
+- (line_input, "%U", unformat_vnet_hw_interface, dm->vnet_main,
+- &hw_if_index))
+- ;
+-
+- else if (unformat (line_input, "subport %d", &subport))
+- ;
+-
+- else if (unformat (line_input, "pipe %d", &pipe))
+- ;
+-
+- else if (unformat (line_input, "tc %d", &tc))
+- ;
+-
+- else if (unformat (line_input, "tc_q %d", &tc_q))
+- ;
+-
+- else
+- {
+- error = clib_error_return (0, "parse error: '%U'",
+- format_unformat_error, line_input);
+- goto done;
+- }
+- }
+-
+- if (hw_if_index == (u32) ~ 0)
+- {
+- error = clib_error_return (0, "please specify interface name!!");
+- goto done;
+- }
+-
+- hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index);
+- xd = vec_elt_at_index (dm->devices, hw->dev_instance);
+-
+- rte_eth_dev_info_get (xd->port_id, &dev_info);
+-
+- pci_dev = dpdk_get_pci_device (&dev_info);
+-
+- if (pci_dev)
+- { /* bonded interface has no pci info */
+- vlib_pci_addr_t pci_addr;
+-
+- pci_addr.domain = pci_dev->addr.domain;
+- pci_addr.bus = pci_dev->addr.bus;
+- pci_addr.slot = pci_dev->addr.devid;
+- pci_addr.function = pci_dev->addr.function;
+-
+- p =
+- hash_get (dm->conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+- }
+-
+- if (p)
+- devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]);
+- else
+- devconf = &dm->conf->default_devconf;
+-
+- if (devconf->hqos_enabled == 0)
+- {
+- vlib_cli_output (vm, "HQoS disabled for this interface");
+- goto done;
+- }
+-
+- /*
+- * Figure out which queue to query. cf rte_sched_port_qindex. (Not sure why
+- * that method isn't made public by DPDK - how _should_ we get the queue ID?)
+- */
+- qindex = subport * devconf->hqos.port.n_pipes_per_subport + pipe;
+- qindex = qindex * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + tc;
+- qindex = qindex * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_q;
+-
+- if (rte_sched_queue_read_stats (xd->hqos_ht->hqos, qindex, &stats, &qlen) !=
+- 0)
+- {
+- error = clib_error_return (0, "failed to read stats");
+- goto done;
+- }
+-
+- vlib_cli_output (vm, "%=24s%=16s", "Stats Parameter", "Value");
+- vlib_cli_output (vm, "%=24s%=16d", "Packets", stats.n_pkts);
+- vlib_cli_output (vm, "%=24s%=16d", "Packets dropped", stats.n_pkts_dropped);
+-#ifdef RTE_SCHED_RED
+- vlib_cli_output (vm, "%=24s%=16d", "Packets dropped (RED)",
+- stats.n_pkts_red_dropped);
+-#endif
+- vlib_cli_output (vm, "%=24s%=16d", "Bytes", stats.n_bytes);
+- vlib_cli_output (vm, "%=24s%=16d", "Bytes dropped", stats.n_bytes_dropped);
+-
+-#else
+-
+- /* Get a line of input */
+- if (!unformat_user (input, unformat_line_input, line_input))
+- return 0;
+-
+- vlib_cli_output (vm, "RTE_SCHED_COLLECT_STATS disabled in DPDK");
+- goto done;
+-
+-#endif
+-
+-done:
+- unformat_free (line_input);
+-
+- return error;
+-}
+-
+-/*?
+- * This command is used to display statistics associated with a HQoS traffic class
+- * queue.
+- *
+- * @note
+- * Statistic collection by the scheduler is disabled by default in DPDK. In order to
+- * turn it on, add the following line to '<em>../vpp/dpdk/Makefile</em>':
+- * - <b>$(call set,RTE_SCHED_COLLECT_STATS,y)</b>
+- *
+- * @cliexpar
+- * Example of how to display statistics of HQoS a HQoS traffic class queue:
+- * @cliexstart{show dpdk hqos queue GigabitEthernet0/9/0 subport 0 pipe 3181 tc 0 tc_q 0}
+- * Stats Parameter Value
+- * Packets 140
+- * Packets dropped 0
+- * Bytes 8400
+- * Bytes dropped 0
+- * @cliexend
+-?*/
+-/* *INDENT-OFF* */
+-VLIB_CLI_COMMAND (cmd_show_dpdk_hqos_queue_stats, static) = {
+- .path = "show dpdk hqos queue",
+- .short_help = "show dpdk hqos queue <interface> subport <subport_id> pipe <pipe_id> tc <tc_id> tc_q <queue_id>",
+- .function = show_dpdk_hqos_queue_stats,
+-};
+-/* *INDENT-ON* */
+-#endif
+-
+ static clib_error_t *
+ show_dpdk_version_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+diff --git a/src/plugins/dpdk/device/device.c b/src/plugins/dpdk/device/device.c
+index 323149043..c355edf6a 100644
+--- a/src/plugins/dpdk/device/device.c
++++ b/src/plugins/dpdk/device/device.c
+@@ -180,21 +180,6 @@ static_always_inline
+ queue_id = (queue_id + 1) % xd->tx_q_used;
+ }
+
+-#if 0
+- if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */
+- {
+- /* no wrap, transmit in one burst */
+- dpdk_device_hqos_per_worker_thread_t *hqos =
+- &xd->hqos_wt[vm->thread_index];
+-
+- ASSERT (hqos->swq != NULL);
+-
+- dpdk_hqos_metadata_set (hqos, mb, n_left);
+- n_sent = rte_ring_sp_enqueue_burst (hqos->swq, (void **) mb,
+- n_left, 0);
+- }
+- else
+-#endif
+ if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
+ {
+ /* no wrap, transmit in one burst */
+diff --git a/src/plugins/dpdk/device/dpdk.h b/src/plugins/dpdk/device/dpdk.h
+index ab28ac06a..68d9e712b 100644
+--- a/src/plugins/dpdk/device/dpdk.h
++++ b/src/plugins/dpdk/device/dpdk.h
+@@ -119,40 +119,6 @@ typedef enum
+
+ typedef uint16_t dpdk_portid_t;
+
+-typedef struct
+-{
+- /* Required for vec_validate_aligned */
+- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+-
+- struct rte_ring *swq;
+-
+- u64 hqos_field0_slabmask;
+- u32 hqos_field0_slabpos;
+- u32 hqos_field0_slabshr;
+- u64 hqos_field1_slabmask;
+- u32 hqos_field1_slabpos;
+- u32 hqos_field1_slabshr;
+- u64 hqos_field2_slabmask;
+- u32 hqos_field2_slabpos;
+- u32 hqos_field2_slabshr;
+- u32 hqos_tc_table[64];
+-} dpdk_device_hqos_per_worker_thread_t;
+-
+-typedef struct
+-{
+- /* Required for vec_validate_aligned */
+- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+- struct rte_ring **swq;
+- struct rte_mbuf **pkts_enq;
+- struct rte_mbuf **pkts_deq;
+- struct rte_sched_port *hqos;
+- u32 hqos_burst_enq;
+- u32 hqos_burst_deq;
+- u32 pkts_enq_len;
+- u32 swq_pos;
+- u32 flush_count;
+-} dpdk_device_hqos_per_hqos_thread_t;
+-
+ #define foreach_dpdk_device_flags \
+ _( 0, ADMIN_UP, "admin-up") \
+ _( 1, PROMISC, "promisc") \
+@@ -160,7 +126,6 @@ typedef struct
+ _( 3, PMD_INIT_FAIL, "pmd-init-fail") \
+ _( 4, MAYBE_MULTISEG, "maybe-multiseg") \
+ _( 5, HAVE_SUBIF, "subif") \
+- _( 6, HQOS, "hqos") \
+ _( 9, TX_OFFLOAD, "tx-offload") \
+ _(10, INTEL_PHDR_CKSUM, "intel-phdr-cksum") \
+ _(11, RX_FLOW_OFFLOAD, "rx-flow-offload") \
+@@ -235,10 +200,6 @@ typedef struct
+ u32 parked_loop_count;
+ struct rte_flow_error last_flow_error;
+
+- /* HQoS related */
+- dpdk_device_hqos_per_worker_thread_t *hqos_wt;
+- dpdk_device_hqos_per_hqos_thread_t *hqos_ht;
+-
+ /* af_packet instance number */
+ u16 af_packet_instance_num;
+
+@@ -278,39 +239,6 @@ typedef struct
+ #define HQOS_FLUSH_COUNT_THRESHOLD 100000
+ #endif
+
+-typedef struct dpdk_device_config_hqos_t
+-{
+- u32 hqos_thread;
+- u32 hqos_thread_valid;
+-
+- u32 swq_size;
+- u32 burst_enq;
+- u32 burst_deq;
+-
+- u32 pktfield0_slabpos;
+- u32 pktfield1_slabpos;
+- u32 pktfield2_slabpos;
+- u64 pktfield0_slabmask;
+- u64 pktfield1_slabmask;
+- u64 pktfield2_slabmask;
+- u32 tc_table[64];
+-
+- struct rte_sched_port_params port;
+- struct rte_sched_subport_params *subport;
+- struct rte_sched_pipe_params *pipe;
+- uint32_t *pipe_map;
+-} dpdk_device_config_hqos_t;
+-
+-int dpdk_hqos_validate_mask (u64 mask, u32 n);
+-void dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
+- hqos, u32 pipe_profile_id);
+-#if 0
+-void dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos);
+-#endif
+-clib_error_t *dpdk_port_setup_hqos (dpdk_device_t * xd,
+- dpdk_device_config_hqos_t * hqos);
+-void dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
+- struct rte_mbuf **pkts, u32 n_pkts);
+
+ #define foreach_dpdk_device_config_item \
+ _ (num_rx_queues) \
+@@ -333,8 +261,6 @@ typedef struct
+ foreach_dpdk_device_config_item
+ #undef _
+ clib_bitmap_t * workers;
+- u32 hqos_enabled;
+- dpdk_device_config_hqos_t hqos;
+ u8 tso;
+ u8 *devargs;
+
+@@ -397,7 +323,6 @@ typedef struct
+
+ /* Devices */
+ dpdk_device_t *devices;
+- dpdk_device_and_queue_t **devices_by_hqos_cpu;
+ dpdk_per_thread_data_t *per_thread_data;
+
+ /* buffer flags template, configurable to enable/disable tcp / udp cksum */
+@@ -409,10 +334,6 @@ typedef struct
+ */
+ u8 admin_up_down_in_progress;
+
+- /* which cpus are running I/O TX */
+- int hqos_cpu_first_index;
+- int hqos_cpu_count;
+-
+ /* control interval of dpdk link state and stat polling */
+ f64 link_state_poll_interval;
+ f64 stat_poll_interval;
+@@ -502,8 +423,6 @@ format_function_t format_dpdk_tx_offload_caps;
+ vnet_flow_dev_ops_function_t dpdk_flow_ops_fn;
+
+ clib_error_t *unformat_rss_fn (unformat_input_t * input, uword * rss_fn);
+-clib_error_t *unformat_hqos (unformat_input_t * input,
+- dpdk_device_config_hqos_t * hqos);
+
+ struct rte_pci_device *dpdk_get_pci_device (const struct rte_eth_dev_info
+ *info);
+diff --git a/src/plugins/dpdk/device/format.c b/src/plugins/dpdk/device/format.c
+index 20493eb77..8ef46512c 100644
+--- a/src/plugins/dpdk/device/format.c
++++ b/src/plugins/dpdk/device/format.c
+@@ -913,25 +913,6 @@ unformat_rss_fn (unformat_input_t * input, uword * rss_fn)
+ return 0;
+ }
+
+-clib_error_t *
+-unformat_hqos (unformat_input_t * input, dpdk_device_config_hqos_t * hqos)
+-{
+- clib_error_t *error = 0;
+-
+- while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+- {
+- if (unformat (input, "hqos-thread %u", &hqos->hqos_thread))
+- hqos->hqos_thread_valid = 1;
+- else
+- {
+- error = clib_error_return (0, "unknown input `%U'",
+- format_unformat_error, input);
+- break;
+- }
+- }
+-
+- return error;
+-}
+
+ /*
+ * fd.io coding-style-patch-verification: OFF
+diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c
+index d0125e939..5d0e5a260 100644
+--- a/src/plugins/dpdk/device/init.c
++++ b/src/plugins/dpdk/device/init.c
+@@ -214,29 +214,9 @@ dpdk_lib_init (dpdk_main_t * dm)
+ dpdk_device_t *xd;
+ vlib_pci_addr_t last_pci_addr;
+ u32 last_pci_addr_port = 0;
+- vlib_thread_registration_t *tr_hqos;
+- uword *p_hqos;
+-
+- u32 next_hqos_cpu = 0;
+ u8 af_packet_instance_num = 0;
+ last_pci_addr.as_u32 = ~0;
+
+- dm->hqos_cpu_first_index = 0;
+- dm->hqos_cpu_count = 0;
+-
+- /* find out which cpus will be used for I/O TX */
+- p_hqos = hash_get_mem (tm->thread_registrations_by_name, "hqos-threads");
+- tr_hqos = p_hqos ? (vlib_thread_registration_t *) p_hqos[0] : 0;
+-
+- if (tr_hqos && tr_hqos->count > 0)
+- {
+- dm->hqos_cpu_first_index = tr_hqos->first_index;
+- dm->hqos_cpu_count = tr_hqos->count;
+- }
+-
+- vec_validate_aligned (dm->devices_by_hqos_cpu, tm->n_vlib_mains - 1,
+- CLIB_CACHE_LINE_BYTES);
+-
+ nports = rte_eth_dev_count_avail ();
+
+ if (nports < 1)
+@@ -596,38 +576,6 @@ dpdk_lib_init (dpdk_main_t * dm)
+ /* assign interface to input thread */
+ int q;
+
+- if (devconf->hqos_enabled)
+- {
+- xd->flags |= DPDK_DEVICE_FLAG_HQOS;
+-
+- int cpu;
+- if (devconf->hqos.hqos_thread_valid)
+- {
+- if (devconf->hqos.hqos_thread >= dm->hqos_cpu_count)
+- return clib_error_return (0, "invalid HQoS thread index");
+-
+- cpu = dm->hqos_cpu_first_index + devconf->hqos.hqos_thread;
+- }
+- else
+- {
+- if (dm->hqos_cpu_count == 0)
+- return clib_error_return (0, "no HQoS threads available");
+-
+- cpu = dm->hqos_cpu_first_index + next_hqos_cpu;
+-
+- next_hqos_cpu++;
+- if (next_hqos_cpu == dm->hqos_cpu_count)
+- next_hqos_cpu = 0;
+-
+- devconf->hqos.hqos_thread_valid = 1;
+- devconf->hqos.hqos_thread = cpu;
+- }
+-
+- dpdk_device_and_queue_t *dq;
+- vec_add2 (dm->devices_by_hqos_cpu[cpu], dq, 1);
+- dq->device = xd->device_index;
+- dq->queue_id = 0;
+- }
+
+ error = ethernet_register_interface
+ (dm->vnet_main, dpdk_device_class.index, xd->device_index,
+@@ -770,14 +718,6 @@ dpdk_lib_init (dpdk_main_t * dm)
+ format_dpdk_device_name, i,
+ format_dpdk_device_errors, xd);
+
+- if (devconf->hqos_enabled)
+- {
+- clib_error_t *rv;
+- rv = dpdk_port_setup_hqos (xd, &devconf->hqos);
+- if (rv)
+- return rv;
+- }
+-
+ /*
+ * A note on Cisco VIC (PMD_ENIC) and VLAN:
+ *
+@@ -1060,11 +1000,7 @@ dpdk_device_config (dpdk_config_main_t * conf, vlib_pci_addr_t pci_addr,
+ }
+
+ devconf->pci_addr.as_u32 = pci_addr.as_u32;
+- devconf->hqos_enabled = 0;
+ devconf->tso = DPDK_DEVICE_TSO_DEFAULT;
+-#if 0
+- dpdk_device_config_hqos_default (&devconf->hqos);
+-#endif
+
+ if (!input)
+ return 0;
+@@ -1097,19 +1033,6 @@ dpdk_device_config (dpdk_config_main_t * conf, vlib_pci_addr_t pci_addr,
+ devconf->vlan_strip_offload = DPDK_DEVICE_VLAN_STRIP_OFF;
+ else if (unformat (input, "vlan-strip-offload on"))
+ devconf->vlan_strip_offload = DPDK_DEVICE_VLAN_STRIP_ON;
+- else
+- if (unformat
+- (input, "hqos %U", unformat_vlib_cli_sub_input, &sub_input))
+- {
+- devconf->hqos_enabled = 1;
+- error = unformat_hqos (&sub_input, &devconf->hqos);
+- if (error)
+- break;
+- }
+- else if (unformat (input, "hqos"))
+- {
+- devconf->hqos_enabled = 1;
+- }
+ else if (unformat (input, "tso on"))
+ {
+ devconf->tso = DPDK_DEVICE_TSO_ON;
diff --git a/extras/deprecated/dpdk-hqos/hqos.c b/extras/deprecated/dpdk-hqos/hqos.c
new file mode 100644
index 00000000000..1a8dd6d98fe
--- /dev/null
+++ b/extras/deprecated/dpdk-hqos/hqos.c
@@ -0,0 +1,771 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/bitmap.h>
+
+#include <vnet/vnet.h>
+#include <vnet/ethernet/ethernet.h>
+#include <dpdk/device/dpdk.h>
+
+#include <vlib/pci/pci.h>
+#include <vlibmemory/api.h>
+#include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
+
+#define vl_typedefs /* define message structures */
+#include <vlibmemory/vl_memory_api_h.h>
+#undef vl_typedefs
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vlibmemory/vl_memory_api_h.h>
+#undef vl_printfun
+
+#include <dpdk/device/dpdk_priv.h>
+
+/***
+ *
+ * HQoS default configuration values
+ *
+ ***/
+
+static dpdk_device_config_hqos_t hqos_params_default = {
+ .hqos_thread_valid = 0,
+
+ .swq_size = 4096,
+ .burst_enq = 256,
+ .burst_deq = 220,
+
+ /*
+ * Packet field to identify the subport.
+ *
+ * Default value: Since only one subport is defined by default (see below:
+ * n_subports_per_port = 1), the subport ID is hardcoded to 0.
+ */
+ .pktfield0_slabpos = 0,
+ .pktfield0_slabmask = 0,
+
+ /*
+ * Packet field to identify the pipe.
+ *
+ * Default value: Assuming Ethernet/IPv4/UDP packets, UDP payload bits 12 .. 23
+ */
+ .pktfield1_slabpos = 40,
+ .pktfield1_slabmask = 0x0000000FFF000000LLU,
+
+ /* Packet field used as index into TC translation table to identify the traffic
+ * class and queue.
+ *
+ * Default value: Assuming Ethernet/IPv4 packets, IPv4 DSCP field
+ */
+ .pktfield2_slabpos = 8,
+ .pktfield2_slabmask = 0x00000000000000FCLLU,
+ .tc_table = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ },
+
+ /* port */
+ .port = {
+ .name = NULL, /* Set at init */
+ .socket = 0, /* Set at init */
+ .rate = 1250000000, /* Assuming 10GbE port */
+ .mtu = 14 + 1500, /* Assuming Ethernet/IPv4 pkt (Ethernet FCS not included) */
+ .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
+ .n_subports_per_port = 1,
+ .n_pipes_per_subport = 4096,
+ .qsize = {64, 64, 64, 64},
+ .pipe_profiles = NULL, /* Set at config */
+ .n_pipe_profiles = 1,
+
+#ifdef RTE_SCHED_RED
+ .red_params = {
+ /* Traffic Class 0 Colors Green / Yellow / Red */
+ [0][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [0][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [0][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+
+ /* Traffic Class 1 - Colors Green / Yellow / Red */
+ [1][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [1][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [1][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+
+ /* Traffic Class 2 - Colors Green / Yellow / Red */
+ [2][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [2][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [2][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+
+ /* Traffic Class 3 - Colors Green / Yellow / Red */
+ [3][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [3][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9},
+ [3][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
+ 10,.wq_log2 = 9}
+ },
+#endif /* RTE_SCHED_RED */
+ },
+};
+
+static struct rte_sched_subport_params hqos_subport_params_default = {
+ .tb_rate = 1250000000, /* 10GbE line rate (measured in bytes/second) */
+ .tb_size = 1000000,
+ .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+ .tc_period = 10,
+};
+
+static struct rte_sched_pipe_params hqos_pipe_params_default = {
+ .tb_rate = 305175, /* 10GbE line rate divided by 4K pipes */
+ .tb_size = 1000000,
+ .tc_rate = {305175, 305175, 305175, 305175},
+ .tc_period = 40,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ .tc_ov_weight = 1,
+#endif
+ .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+};
+
+/***
+ *
+ * HQoS configuration
+ *
+ ***/
+
+int
+dpdk_hqos_validate_mask (u64 mask, u32 n)
+{
+ int count = __builtin_popcountll (mask);
+ int pos_lead = sizeof (u64) * 8 - count_leading_zeros (mask);
+ int pos_trail = count_trailing_zeros (mask);
+ int count_expected = __builtin_popcount (n - 1);
+
+ /* Handle the exceptions */
+ if (n == 0)
+ return -1; /* Error */
+
+ if ((mask == 0) && (n == 1))
+ return 0; /* OK */
+
+ if (((mask == 0) && (n != 1)) || ((mask != 0) && (n == 1)))
+ return -2; /* Error */
+
+ /* Check that mask is contiguous */
+ if ((pos_lead - pos_trail) != count)
+ return -3; /* Error */
+
+ /* Check that mask contains the expected number of bits set */
+ if (count != count_expected)
+ return -4; /* Error */
+
+ return 0; /* OK */
+}
+
+void
+dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
+ hqos, u32 pipe_profile_id)
+{
+ memcpy (&hqos->pipe[pipe_profile_id], &hqos_pipe_params_default,
+ sizeof (hqos_pipe_params_default));
+}
+
+void
+dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos)
+{
+ struct rte_sched_subport_params *subport_params;
+ struct rte_sched_pipe_params *pipe_params;
+ u32 *pipe_map;
+ u32 i;
+
+ memcpy (hqos, &hqos_params_default, sizeof (hqos_params_default));
+
+ /* pipe */
+ vec_add2 (hqos->pipe, pipe_params, hqos->port.n_pipe_profiles);
+
+ for (i = 0; i < vec_len (hqos->pipe); i++)
+ memcpy (&pipe_params[i],
+ &hqos_pipe_params_default, sizeof (hqos_pipe_params_default));
+
+ hqos->port.pipe_profiles = hqos->pipe;
+
+ /* subport */
+ vec_add2 (hqos->subport, subport_params, hqos->port.n_subports_per_port);
+
+ for (i = 0; i < vec_len (hqos->subport); i++)
+ memcpy (&subport_params[i],
+ &hqos_subport_params_default,
+ sizeof (hqos_subport_params_default));
+
+ /* pipe profile */
+ vec_add2 (hqos->pipe_map,
+ pipe_map,
+ hqos->port.n_subports_per_port * hqos->port.n_pipes_per_subport);
+
+ for (i = 0; i < vec_len (hqos->pipe_map); i++)
+ pipe_map[i] = 0;
+}
+
+/***
+ *
+ * HQoS init
+ *
+ ***/
+
+clib_error_t *
+dpdk_port_setup_hqos (dpdk_device_t * xd, dpdk_device_config_hqos_t * hqos)
+{
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ char name[32];
+ u32 subport_id, i;
+ int rv;
+
+ /* Detect the set of worker threads */
+ int worker_thread_first = 0;
+ int worker_thread_count = 0;
+
+ uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ vlib_thread_registration_t *tr =
+ p ? (vlib_thread_registration_t *) p[0] : 0;
+
+ if (tr && tr->count > 0)
+ {
+ worker_thread_first = tr->first_index;
+ worker_thread_count = tr->count;
+ }
+
+ /* Allocate the per-thread device data array */
+ vec_validate_aligned (xd->hqos_wt, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+ clib_memset (xd->hqos_wt, 0, tm->n_vlib_mains * sizeof (xd->hqos_wt[0]));
+
+ vec_validate_aligned (xd->hqos_ht, 0, CLIB_CACHE_LINE_BYTES);
+ clib_memset (xd->hqos_ht, 0, sizeof (xd->hqos_ht[0]));
+
+ /* Allocate space for one SWQ per worker thread in the I/O TX thread data structure */
+ vec_validate (xd->hqos_ht->swq, worker_thread_count);
+
+ /* SWQ */
+ for (i = 0; i < worker_thread_count + 1; i++)
+ {
+ u32 swq_flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
+
+ snprintf (name, sizeof (name), "SWQ-worker%u-to-device%u", i,
+ xd->port_id);
+ xd->hqos_ht->swq[i] =
+ rte_ring_create (name, hqos->swq_size, xd->cpu_socket, swq_flags);
+ if (xd->hqos_ht->swq[i] == NULL)
+ return clib_error_return (0,
+ "SWQ-worker%u-to-device%u: rte_ring_create err",
+ i, xd->port_id);
+ }
+
+ /*
+ * HQoS
+ */
+
+ /* HQoS port */
+ snprintf (name, sizeof (name), "HQoS%u", xd->port_id);
+ hqos->port.name = strdup (name);
+ if (hqos->port.name == NULL)
+ return clib_error_return (0, "HQoS%u: strdup err", xd->port_id);
+
+ hqos->port.socket = rte_eth_dev_socket_id (xd->port_id);
+ if (hqos->port.socket == SOCKET_ID_ANY)
+ hqos->port.socket = 0;
+
+ xd->hqos_ht->hqos = rte_sched_port_config (&hqos->port);
+ if (xd->hqos_ht->hqos == NULL)
+ return clib_error_return (0, "HQoS%u: rte_sched_port_config err",
+ xd->port_id);
+
+ /* HQoS subport */
+ for (subport_id = 0; subport_id < hqos->port.n_subports_per_port;
+ subport_id++)
+ {
+ u32 pipe_id;
+
+ rv =
+ rte_sched_subport_config (xd->hqos_ht->hqos, subport_id,
+ &hqos->subport[subport_id]);
+ if (rv)
+ return clib_error_return (0,
+ "HQoS%u subport %u: rte_sched_subport_config err (%d)",
+ xd->port_id, subport_id, rv);
+
+ /* HQoS pipe */
+ for (pipe_id = 0; pipe_id < hqos->port.n_pipes_per_subport; pipe_id++)
+ {
+ u32 pos = subport_id * hqos->port.n_pipes_per_subport + pipe_id;
+ u32 profile_id = hqos->pipe_map[pos];
+
+ rv =
+ rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id,
+ profile_id);
+ if (rv)
+ return clib_error_return (0,
+ "HQoS%u subport %u pipe %u: rte_sched_pipe_config err (%d)",
+ xd->port_id, subport_id, pipe_id, rv);
+ }
+ }
+
+ /* Set up per-thread device data for the I/O TX thread */
+ xd->hqos_ht->hqos_burst_enq = hqos->burst_enq;
+ xd->hqos_ht->hqos_burst_deq = hqos->burst_deq;
+ vec_validate (xd->hqos_ht->pkts_enq, 2 * hqos->burst_enq - 1);
+ vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1);
+ xd->hqos_ht->pkts_enq_len = 0;
+ xd->hqos_ht->swq_pos = 0;
+ xd->hqos_ht->flush_count = 0;
+
+ /* Set up per-thread device data for each worker thread */
+ for (i = 0; i < worker_thread_count + 1; i++)
+ {
+ u32 tid;
+ if (i)
+ tid = worker_thread_first + (i - 1);
+ else
+ tid = i;
+
+ xd->hqos_wt[tid].swq = xd->hqos_ht->swq[i];
+ xd->hqos_wt[tid].hqos_field0_slabpos = hqos->pktfield0_slabpos;
+ xd->hqos_wt[tid].hqos_field0_slabmask = hqos->pktfield0_slabmask;
+ xd->hqos_wt[tid].hqos_field0_slabshr =
+ count_trailing_zeros (hqos->pktfield0_slabmask);
+ xd->hqos_wt[tid].hqos_field1_slabpos = hqos->pktfield1_slabpos;
+ xd->hqos_wt[tid].hqos_field1_slabmask = hqos->pktfield1_slabmask;
+ xd->hqos_wt[tid].hqos_field1_slabshr =
+ count_trailing_zeros (hqos->pktfield1_slabmask);
+ xd->hqos_wt[tid].hqos_field2_slabpos = hqos->pktfield2_slabpos;
+ xd->hqos_wt[tid].hqos_field2_slabmask = hqos->pktfield2_slabmask;
+ xd->hqos_wt[tid].hqos_field2_slabshr =
+ count_trailing_zeros (hqos->pktfield2_slabmask);
+ memcpy (xd->hqos_wt[tid].hqos_tc_table, hqos->tc_table,
+ sizeof (hqos->tc_table));
+ }
+
+ return 0;
+}
+
+/***
+ *
+ * HQoS run-time
+ *
+ ***/
+/*
+ * dpdk_hqos_thread - Contains the main loop of an HQoS thread.
+ *
+ * w
+ * Information for the current thread
+ */
+static_always_inline void
+dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ u32 thread_index = vm->thread_index;
+ u32 dev_pos;
+
+ dev_pos = 0;
+ while (1)
+ {
+ vlib_worker_thread_barrier_check ();
+
+ u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]);
+ if (dev_pos >= n_devs)
+ dev_pos = 0;
+
+ dpdk_device_and_queue_t *dq =
+ vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos);
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
+
+ dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht;
+ u32 device_index = xd->port_id;
+ u16 queue_id = dq->queue_id;
+
+ struct rte_mbuf **pkts_enq = hqos->pkts_enq;
+ u32 pkts_enq_len = hqos->pkts_enq_len;
+ u32 swq_pos = hqos->swq_pos;
+ u32 n_swq = vec_len (hqos->swq), i;
+ u32 flush_count = hqos->flush_count;
+
+ for (i = 0; i < n_swq; i++)
+ {
+ /* Get current SWQ for this device */
+ struct rte_ring *swq = hqos->swq[swq_pos];
+
+ /* Read SWQ burst to packet buffer of this device */
+ pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
+ (void **)
+ &pkts_enq[pkts_enq_len],
+ hqos->hqos_burst_enq, 0);
+
+ /* Get next SWQ for this device */
+ swq_pos++;
+ if (swq_pos >= n_swq)
+ swq_pos = 0;
+ hqos->swq_pos = swq_pos;
+
+ /* HWQ TX enqueue when burst available */
+ if (pkts_enq_len >= hqos->hqos_burst_enq)
+ {
+ u32 n_pkts = rte_eth_tx_burst (device_index,
+ (uint16_t) queue_id,
+ pkts_enq,
+ (uint16_t) pkts_enq_len);
+
+ for (; n_pkts < pkts_enq_len; n_pkts++)
+ rte_pktmbuf_free (pkts_enq[n_pkts]);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+ if (pkts_enq_len)
+ {
+ flush_count++;
+ if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
+ {
+ rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+ }
+ hqos->pkts_enq_len = pkts_enq_len;
+ hqos->flush_count = flush_count;
+
+ /* Advance to next device */
+ dev_pos++;
+ }
+}
+
+static_always_inline void
+dpdk_hqos_thread_internal (vlib_main_t * vm)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ u32 thread_index = vm->thread_index;
+ u32 dev_pos;
+
+ dev_pos = 0;
+ while (1)
+ {
+ vlib_worker_thread_barrier_check ();
+
+ u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]);
+ if (PREDICT_FALSE (n_devs == 0))
+ {
+ dev_pos = 0;
+ continue;
+ }
+ if (dev_pos >= n_devs)
+ dev_pos = 0;
+
+ dpdk_device_and_queue_t *dq =
+ vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos);
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
+
+ dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht;
+ u32 device_index = xd->port_id;
+ u16 queue_id = dq->queue_id;
+
+ struct rte_mbuf **pkts_enq = hqos->pkts_enq;
+ struct rte_mbuf **pkts_deq = hqos->pkts_deq;
+ u32 pkts_enq_len = hqos->pkts_enq_len;
+ u32 swq_pos = hqos->swq_pos;
+ u32 n_swq = vec_len (hqos->swq), i;
+ u32 flush_count = hqos->flush_count;
+
+ /*
+ * SWQ dequeue and HQoS enqueue for current device
+ */
+ for (i = 0; i < n_swq; i++)
+ {
+ /* Get current SWQ for this device */
+ struct rte_ring *swq = hqos->swq[swq_pos];
+
+ /* Read SWQ burst to packet buffer of this device */
+ pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
+ (void **)
+ &pkts_enq[pkts_enq_len],
+ hqos->hqos_burst_enq, 0);
+
+ /* Get next SWQ for this device */
+ swq_pos++;
+ if (swq_pos >= n_swq)
+ swq_pos = 0;
+ hqos->swq_pos = swq_pos;
+
+ /* HQoS enqueue when burst available */
+ if (pkts_enq_len >= hqos->hqos_burst_enq)
+ {
+ rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+ if (pkts_enq_len)
+ {
+ flush_count++;
+ if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
+ {
+ rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+ }
+ hqos->pkts_enq_len = pkts_enq_len;
+ hqos->flush_count = flush_count;
+
+ /*
+ * HQoS dequeue and HWQ TX enqueue for current device
+ */
+ {
+ u32 pkts_deq_len, n_pkts;
+
+ pkts_deq_len = rte_sched_port_dequeue (hqos->hqos,
+ pkts_deq,
+ hqos->hqos_burst_deq);
+
+ for (n_pkts = 0; n_pkts < pkts_deq_len;)
+ n_pkts += rte_eth_tx_burst (device_index,
+ (uint16_t) queue_id,
+ &pkts_deq[n_pkts],
+ (uint16_t) (pkts_deq_len - n_pkts));
+ }
+
+ /* Advance to next device */
+ dev_pos++;
+ }
+}
+
+void
+dpdk_hqos_thread (vlib_worker_thread_t * w)
+{
+ vlib_main_t *vm;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ dpdk_main_t *dm = &dpdk_main;
+
+ vm = vlib_get_main ();
+
+ ASSERT (vm->thread_index == vlib_get_thread_index ());
+
+ clib_time_init (&vm->clib_time);
+ clib_mem_set_heap (w->thread_mheap);
+
+ /* Wait until the dpdk init sequence is complete */
+ while (tm->worker_thread_release == 0)
+ vlib_worker_thread_barrier_check ();
+
+ if (vec_len (dm->devices_by_hqos_cpu[vm->thread_index]) == 0)
+ return
+ clib_error
+ ("current I/O TX thread does not have any devices assigned to it");
+
+ if (DPDK_HQOS_DBG_BYPASS)
+ dpdk_hqos_thread_internal_hqos_dbg_bypass (vm);
+ else
+ dpdk_hqos_thread_internal (vm);
+}
+
+void
+dpdk_hqos_thread_fn (void *arg)
+{
+ vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
+ vlib_worker_thread_init (w);
+ dpdk_hqos_thread (w);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_THREAD (hqos_thread_reg, static) =
+{
+ .name = "hqos-threads",
+ .short_name = "hqos-threads",
+ .function = dpdk_hqos_thread_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * HQoS run-time code to be called by the worker threads
+ */
+#define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
+({ \
+ u64 slab = *((u64 *) &byte_array[slab_pos]); \
+ u64 val = (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
+ val; \
+})
+
+#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color) \
+ ((((u64) (queue)) & 0x3) | \
+ ((((u64) (traffic_class)) & 0x3) << 2) | \
+ ((((u64) (color)) & 0x3) << 4) | \
+ ((((u64) (subport)) & 0xFFFF) << 16) | \
+ ((((u64) (pipe)) & 0xFFFFFFFF) << 32))
+
+void
+dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
+ struct rte_mbuf **pkts, u32 n_pkts)
+{
+ u32 i;
+
+ for (i = 0; i < (n_pkts & (~0x3)); i += 4)
+ {
+ struct rte_mbuf *pkt0 = pkts[i];
+ struct rte_mbuf *pkt1 = pkts[i + 1];
+ struct rte_mbuf *pkt2 = pkts[i + 2];
+ struct rte_mbuf *pkt3 = pkts[i + 3];
+
+ u8 *pkt0_data = rte_pktmbuf_mtod (pkt0, u8 *);
+ u8 *pkt1_data = rte_pktmbuf_mtod (pkt1, u8 *);
+ u8 *pkt2_data = rte_pktmbuf_mtod (pkt2, u8 *);
+ u8 *pkt3_data = rte_pktmbuf_mtod (pkt3, u8 *);
+
+ u64 pkt0_subport = BITFIELD (pkt0_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt0_pipe = BITFIELD (pkt0_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt0_dscp = BITFIELD (pkt0_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt0_tc = hqos->hqos_tc_table[pkt0_dscp & 0x3F] >> 2;
+ u32 pkt0_tc_q = hqos->hqos_tc_table[pkt0_dscp & 0x3F] & 0x3;
+
+ u64 pkt1_subport = BITFIELD (pkt1_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt1_pipe = BITFIELD (pkt1_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt1_dscp = BITFIELD (pkt1_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt1_tc = hqos->hqos_tc_table[pkt1_dscp & 0x3F] >> 2;
+ u32 pkt1_tc_q = hqos->hqos_tc_table[pkt1_dscp & 0x3F] & 0x3;
+
+ u64 pkt2_subport = BITFIELD (pkt2_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt2_pipe = BITFIELD (pkt2_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt2_dscp = BITFIELD (pkt2_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt2_tc = hqos->hqos_tc_table[pkt2_dscp & 0x3F] >> 2;
+ u32 pkt2_tc_q = hqos->hqos_tc_table[pkt2_dscp & 0x3F] & 0x3;
+
+ u64 pkt3_subport = BITFIELD (pkt3_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt3_pipe = BITFIELD (pkt3_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt3_dscp = BITFIELD (pkt3_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt3_tc = hqos->hqos_tc_table[pkt3_dscp & 0x3F] >> 2;
+ u32 pkt3_tc_q = hqos->hqos_tc_table[pkt3_dscp & 0x3F] & 0x3;
+
+ u64 pkt0_sched = RTE_SCHED_PORT_HIERARCHY (pkt0_subport,
+ pkt0_pipe,
+ pkt0_tc,
+ pkt0_tc_q,
+ 0);
+ u64 pkt1_sched = RTE_SCHED_PORT_HIERARCHY (pkt1_subport,
+ pkt1_pipe,
+ pkt1_tc,
+ pkt1_tc_q,
+ 0);
+ u64 pkt2_sched = RTE_SCHED_PORT_HIERARCHY (pkt2_subport,
+ pkt2_pipe,
+ pkt2_tc,
+ pkt2_tc_q,
+ 0);
+ u64 pkt3_sched = RTE_SCHED_PORT_HIERARCHY (pkt3_subport,
+ pkt3_pipe,
+ pkt3_tc,
+ pkt3_tc_q,
+ 0);
+
+ pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
+ pkt0->hash.sched.hi = pkt0_sched >> 32;
+ pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
+ pkt1->hash.sched.hi = pkt1_sched >> 32;
+ pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
+ pkt2->hash.sched.hi = pkt2_sched >> 32;
+ pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
+ pkt3->hash.sched.hi = pkt3_sched >> 32;
+ }
+
+ for (; i < n_pkts; i++)
+ {
+ struct rte_mbuf *pkt = pkts[i];
+
+ u8 *pkt_data = rte_pktmbuf_mtod (pkt, u8 *);
+
+ u64 pkt_subport = BITFIELD (pkt_data, hqos->hqos_field0_slabpos,
+ hqos->hqos_field0_slabmask,
+ hqos->hqos_field0_slabshr);
+ u64 pkt_pipe = BITFIELD (pkt_data, hqos->hqos_field1_slabpos,
+ hqos->hqos_field1_slabmask,
+ hqos->hqos_field1_slabshr);
+ u64 pkt_dscp = BITFIELD (pkt_data, hqos->hqos_field2_slabpos,
+ hqos->hqos_field2_slabmask,
+ hqos->hqos_field2_slabshr);
+ u32 pkt_tc = hqos->hqos_tc_table[pkt_dscp & 0x3F] >> 2;
+ u32 pkt_tc_q = hqos->hqos_tc_table[pkt_dscp & 0x3F] & 0x3;
+
+ u64 pkt_sched = RTE_SCHED_PORT_HIERARCHY (pkt_subport,
+ pkt_pipe,
+ pkt_tc,
+ pkt_tc_q,
+ 0);
+
+ pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
+ pkt->hash.sched.hi = pkt_sched >> 32;
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/extras/deprecated/dpdk-hqos/qos_doc.md b/extras/deprecated/dpdk-hqos/qos_doc.md
new file mode 100644
index 00000000000..fe3bb1bcd4b
--- /dev/null
+++ b/extras/deprecated/dpdk-hqos/qos_doc.md
@@ -0,0 +1,411 @@
+# QoS Hierarchical Scheduler {#qos_doc}
+
+The Quality-of-Service (QoS) scheduler performs egress-traffic management by
+prioritizing the transmission of the packets of different type services and
+subscribers based on the Service Level Agreements (SLAs). The QoS scheduler can
+be enabled on one or more NIC output interfaces depending upon the
+requirement.
+
+
+## Overview
+
+The QoS scheduler supports a number of scheduling and shaping levels which
+construct hierarchical-tree. The first level in the hierarchy is port (i.e.
+the physical interface) that constitutes the root node of the tree. The
+subsequent level is subport which represents the group of the
+users/subscribers. The individual user/subscriber is represented by the pipe
+at the next level. Each user can have different traffic type based on the
+criteria of specific loss rate, jitter, and latency. These traffic types are
+represented at the traffic-class level in the form of different traffic-
+classes. The last level contains number of queues which are grouped together
+to host the packets of the specific class type traffic.
+
+The QoS scheduler implementation requires flow classification, enqueue and
+dequeue operations. The flow classification is mandatory stage for HQoS where
+incoming packets are classified by mapping the packet fields information to
+5-tuple (HQoS subport, pipe, traffic class, queue within traffic class, and
+color) and storing that information in mbuf sched field. The enqueue operation
+uses this information to determine the queue for storing the packet, and at
+this stage, if the specific queue is full, QoS drops the packet. The dequeue
+operation consists of scheduling the packet based on its length and available
+credits, and handing over the scheduled packet to the output interface.
+
+For more information on QoS Scheduler, please refer DPDK Programmer's Guide-
+http://dpdk.org/doc/guides/prog_guide/qos_framework.html
+
+
+### QoS Scheduler Parameters
+
+Following illustrates the default HQoS configuration for each 10GbE output
+port:
+
+Single subport (subport 0):
+ - Subport rate set to 100% of port rate
+ - Each of the 4 traffic classes has rate set to 100% of port rate
+
+4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
+ - Pipe rate set to 1/4K of port rate
+ - Each of the 4 traffic classes has rate set to 100% of pipe rate
+ - Within each traffic class, the byte-level WRR weights for the 4 queues are set to 1:1:1:1
+
+
+#### Port configuration
+
+```
+port {
+ rate 1250000000 /* Assuming 10GbE port */
+ frame_overhead 24 /* Overhead fields per Ethernet frame:
+ * 7B (Preamble) +
+ * 1B (Start of Frame Delimiter (SFD)) +
+ * 4B (Frame Check Sequence (FCS)) +
+ * 12B (Inter Frame Gap (IFG))
+ */
+ mtu 1522 /* Assuming Ethernet/IPv4 pkt (FCS not included) */
+ n_subports_per_port 1 /* Number of subports per output interface */
+ n_pipes_per_subport 4096 /* Number of pipes (users/subscribers) */
+ queue_sizes 64 64 64 64 /* Packet queue size for each traffic class.
+ * All queues within the same pipe traffic class
+ * have the same size. Queues from different
+ * pipes serving the same traffic class have
+ * the same size. */
+}
+```
+
+
+#### Subport configuration
+
+```
+subport 0 {
+ tb_rate 1250000000 /* Subport level token bucket rate (bytes per second) */
+ tb_size 1000000 /* Subport level token bucket size (bytes) */
+ tc0_rate 1250000000 /* Subport level token bucket rate for traffic class 0 (bytes per second) */
+ tc1_rate 1250000000 /* Subport level token bucket rate for traffic class 1 (bytes per second) */
+ tc2_rate 1250000000 /* Subport level token bucket rate for traffic class 2 (bytes per second) */
+ tc3_rate 1250000000 /* Subport level token bucket rate for traffic class 3 (bytes per second) */
+ tc_period 10 /* Time interval for refilling the token bucket associated with traffic class (Milliseconds) */
+ pipe 0 4095 profile 0 /* pipes (users/subscribers) configured with pipe profile 0 */
+}
+```
+
+
+#### Pipe configuration
+
+```
+pipe_profile 0 {
+ tb_rate 305175 /* Pipe level token bucket rate (bytes per second) */
+ tb_size 1000000 /* Pipe level token bucket size (bytes) */
+ tc0_rate 305175 /* Pipe level token bucket rate for traffic class 0 (bytes per second) */
+ tc1_rate 305175 /* Pipe level token bucket rate for traffic class 1 (bytes per second) */
+ tc2_rate 305175 /* Pipe level token bucket rate for traffic class 2 (bytes per second) */
+ tc3_rate 305175 /* Pipe level token bucket rate for traffic class 3 (bytes per second) */
+ tc_period 40 /* Time interval for refilling the token bucket associated with traffic class at pipe level (Milliseconds) */
+ tc3_oversubscription_weight 1 /* Weight traffic class 3 oversubscription */
+ tc0_wrr_weights 1 1 1 1 /* Pipe queues WRR weights for traffic class 0 */
+ tc1_wrr_weights 1 1 1 1 /* Pipe queues WRR weights for traffic class 1 */
+ tc2_wrr_weights 1 1 1 1 /* Pipe queues WRR weights for traffic class 2 */
+ tc3_wrr_weights 1 1 1 1 /* Pipe queues WRR weights for traffic class 3 */
+}
+```
+
+
+#### Random Early Detection (RED) parameters per traffic class and color (Green / Yellow / Red)
+
+```
+red {
+ tc0_wred_min 48 40 32 /* Minimum threshold for traffic class 0 queue (min_th) in number of packets */
+ tc0_wred_max 64 64 64 /* Maximum threshold for traffic class 0 queue (max_th) in number of packets */
+ tc0_wred_inv_prob 10 10 10 /* Inverse of packet marking probability for traffic class 0 queue (maxp = 1 / maxp_inv) */
+ tc0_wred_weight 9 9 9 /* Traffic Class 0 queue weight */
+ tc1_wred_min 48 40 32 /* Minimum threshold for traffic class 1 queue (min_th) in number of packets */
+ tc1_wred_max 64 64 64 /* Maximum threshold for traffic class 1 queue (max_th) in number of packets */
+ tc1_wred_inv_prob 10 10 10 /* Inverse of packet marking probability for traffic class 1 queue (maxp = 1 / maxp_inv) */
+ tc1_wred_weight 9 9 9 /* Traffic Class 1 queue weight */
+ tc2_wred_min 48 40 32 /* Minimum threshold for traffic class 2 queue (min_th) in number of packets */
+ tc2_wred_max 64 64 64 /* Maximum threshold for traffic class 2 queue (max_th) in number of packets */
+ tc2_wred_inv_prob 10 10 10 /* Inverse of packet marking probability for traffic class 2 queue (maxp = 1 / maxp_inv) */
+ tc2_wred_weight 9 9 9 /* Traffic Class 2 queue weight */
+ tc3_wred_min 48 40 32 /* Minimum threshold for traffic class 3 queue (min_th) in number of packets */
+ tc3_wred_max 64 64 64 /* Maximum threshold for traffic class 3 queue (max_th) in number of packets */
+ tc3_wred_inv_prob 10 10 10 /* Inverse of packet marking probability for traffic class 3 queue (maxp = 1 / maxp_inv) */
+ tc3_wred_weight 9 9 9 /* Traffic Class 3 queue weight */
+}
+```
+
+
+### DPDK QoS Scheduler Integration in VPP
+
+The Hierarchical Quality-of-Service (HQoS) scheduler object could be seen as
+part of the logical NIC output interface. To enable HQoS on specific output
+interface, vpp startup.conf file has to be configured accordingly. The output
+interface that requires HQoS, should have "hqos" parameter specified in dpdk
+section. Another optional parameter "hqos-thread" has been defined which can
+be used to associate the output interface with specific hqos thread. In cpu
+section of the config file, "corelist-hqos-threads" is introduced to assign
+logical cpu cores to run the HQoS threads. A HQoS thread can run multiple HQoS
+objects each associated with different output interfaces. All worker threads
+instead of writing packets to NIC TX queue directly, write the packets to a
+software queues. The hqos_threads read the software queues, and enqueue the
+packets to HQoS objects, as well as dequeue packets from HQOS objects and
+write them to NIC output interfaces. The worker threads need to be able to
+send the packets to any output interface, therefore, each HQoS object
+associated with NIC output interface should have software queues equal to
+worker threads count.
+
+Following illustrates the sample startup configuration file with 4x worker
+threads feeding 2x hqos threads that handle each QoS scheduler for 1x output
+interface.
+
+```
+dpdk {
+ socket-mem 16384,16384
+
+ dev 0000:02:00.0 {
+ num-rx-queues 2
+ hqos
+ }
+ dev 0000:06:00.0 {
+ num-rx-queues 2
+ hqos
+ }
+
+ num-mbufs 1000000
+}
+
+cpu {
+ main-core 0
+ corelist-workers 1, 2, 3, 4
+ corelist-hqos-threads 5, 6
+}
+```
+
+
+### QoS scheduler CLI Commands
+
+Each QoS scheduler instance is initialised with default parameters required to
+configure hqos port, subport, pipe and queues. Some of the parameters can be
+re-configured in run-time through CLI commands.
+
+
+#### Configuration
+
+Following commands can be used to configure QoS scheduler parameters.
+
+The command below can be used to set the subport level parameters such as
+token bucket rate (bytes per seconds), token bucket size (bytes), traffic
+class rates (bytes per seconds) and token update period (Milliseconds).
+
+```
+set dpdk interface hqos subport <interface> subport <subport_id> [rate <n>]
+ [bktsize <n>] [tc0 <n>] [tc1 <n>] [tc2 <n>] [tc3 <n>] [period <n>]
+```
+
+For setting the pipe profile, following command can be used.
+
+```
+set dpdk interface hqos pipe <interface> subport <subport_id> pipe <pipe_id>
+ profile <profile_id>
+```
+
+To assign QoS scheduler instance to the specific thread, following command can
+be used.
+
+```
+set dpdk interface hqos placement <interface> thread <n>
+```
+
+The command below is used to set the packet fields required for classifying
+the incoming packet. As a result of classification process, packet field
+information will be mapped to 5 tuples (subport, pipe, traffic class, pipe,
+color) and stored in packet mbuf.
+
+```
+set dpdk interface hqos pktfield <interface> id subport|pipe|tc offset <n>
+ mask <hex-mask>
+```
+
+The DSCP table entries used for identifying the traffic class and queue can be set using the command below;
+
+```
+set dpdk interface hqos tctbl <interface> entry <map_val> tc <tc_id> queue <queue_id>
+```
+
+
+#### Show Command
+
+The QoS Scheduler configuration can displayed using the command below.
+
+```
+ vpp# show dpdk interface hqos TenGigabitEthernet2/0/0
+ Thread:
+ Input SWQ size = 4096 packets
+ Enqueue burst size = 256 packets
+ Dequeue burst size = 220 packets
+ Packet field 0: slab position = 0, slab bitmask = 0x0000000000000000 (subport)
+ Packet field 1: slab position = 40, slab bitmask = 0x0000000fff000000 (pipe)
+ Packet field 2: slab position = 8, slab bitmask = 0x00000000000000fc (tc)
+ Packet field 2 tc translation table: ([Mapped Value Range]: tc/queue tc/queue ...)
+ [ 0 .. 15]: 0/0 0/1 0/2 0/3 1/0 1/1 1/2 1/3 2/0 2/1 2/2 2/3 3/0 3/1 3/2 3/3
+ [16 .. 31]: 0/0 0/1 0/2 0/3 1/0 1/1 1/2 1/3 2/0 2/1 2/2 2/3 3/0 3/1 3/2 3/3
+ [32 .. 47]: 0/0 0/1 0/2 0/3 1/0 1/1 1/2 1/3 2/0 2/1 2/2 2/3 3/0 3/1 3/2 3/3
+ [48 .. 63]: 0/0 0/1 0/2 0/3 1/0 1/1 1/2 1/3 2/0 2/1 2/2 2/3 3/0 3/1 3/2 3/3
+ Port:
+ Rate = 1250000000 bytes/second
+ MTU = 1514 bytes
+ Frame overhead = 24 bytes
+ Number of subports = 1
+ Number of pipes per subport = 4096
+ Packet queue size: TC0 = 64, TC1 = 64, TC2 = 64, TC3 = 64 packets
+ Number of pipe profiles = 1
+ Subport 0:
+ Rate = 120000000 bytes/second
+ Token bucket size = 1000000 bytes
+ Traffic class rate: TC0 = 120000000, TC1 = 120000000, TC2 = 120000000, TC3 = 120000000 bytes/second
+ TC period = 10 milliseconds
+ Pipe profile 0:
+ Rate = 305175 bytes/second
+ Token bucket size = 1000000 bytes
+ Traffic class rate: TC0 = 305175, TC1 = 305175, TC2 = 305175, TC3 = 305175 bytes/second
+ TC period = 40 milliseconds
+ TC0 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+ TC1 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+ TC2 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+ TC3 WRR weights: Q0 = 1, Q1 = 1, Q2 = 1, Q3 = 1
+```
+
+The QoS Scheduler placement over the logical cpu cores can be displayed using
+below command.
+
+```
+ vpp# show dpdk interface hqos placement
+ Thread 5 (vpp_hqos-threads_0 at lcore 5):
+ TenGigabitEthernet2/0/0 queue 0
+ Thread 6 (vpp_hqos-threads_1 at lcore 6):
+ TenGigabitEthernet4/0/1 queue 0
+```
+
+
+### QoS Scheduler Binary APIs
+
+This section explains the available binary APIs for configuring QoS scheduler
+parameters in run-time.
+
+The following API can be used to set the pipe profile of a pipe that belongs
+to a given subport:
+
+```
+sw_interface_set_dpdk_hqos_pipe rx <intfc> | sw_if_index <id>
+ subport <subport-id> pipe <pipe-id> profile <profile-id>
+```
+
+The data structures used for set the pipe profile parameter are as follows;
+
+```
+ /** \\brief DPDK interface HQoS pipe profile set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param subport - subport ID
+ @param pipe - pipe ID within its subport
+ @param profile - pipe profile ID
+ */
+ define sw_interface_set_dpdk_hqos_pipe {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 subport;
+ u32 pipe;
+ u32 profile;
+ };
+
+ /** \\brief DPDK interface HQoS pipe profile set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+ */
+ define sw_interface_set_dpdk_hqos_pipe_reply {
+ u32 context;
+ i32 retval;
+ };
+```
+
+The following API can be used to set the subport level parameters, for
+example- token bucket rate (bytes per seconds), token bucket size (bytes),
+traffic class rate (bytes per seconds) and tokens update period.
+
+```
+sw_interface_set_dpdk_hqos_subport rx <intfc> | sw_if_index <id>
+ subport <subport-id> [rate <n>] [bktsize <n>]
+ [tc0 <n>] [tc1 <n>] [tc2 <n>] [tc3 <n>] [period <n>]
+```
+
+The data structures used for set the subport level parameter are as follows;
+
+```
+ /** \\brief DPDK interface HQoS subport parameters set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param subport - subport ID
+ @param tb_rate - subport token bucket rate (measured in bytes/second)
+ @param tb_size - subport token bucket size (measured in credits)
+ @param tc_rate - subport traffic class 0 .. 3 rates (measured in bytes/second)
+ @param tc_period - enforcement period for rates (measured in milliseconds)
+ */
+ define sw_interface_set_dpdk_hqos_subport {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 subport;
+ u32 tb_rate;
+ u32 tb_size;
+ u32 tc_rate[4];
+ u32 tc_period;
+ };
+
+ /** \\brief DPDK interface HQoS subport parameters set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+ */
+ define sw_interface_set_dpdk_hqos_subport_reply {
+ u32 context;
+ i32 retval;
+ };
+```
+
+The following API can be used set the DSCP table entry. The DSCP table have
+64 entries to map the packet DSCP field onto traffic class and hqos input
+queue.
+
+```
+sw_interface_set_dpdk_hqos_tctbl rx <intfc> | sw_if_index <id>
+ entry <n> tc <n> queue <n>
+```
+
+The data structures used for setting DSCP table entries are given below.
+
+```
+ /** \\brief DPDK interface HQoS tctbl entry set request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - the interface
+ @param entry - entry index ID
+ @param tc - traffic class (0 .. 3)
+ @param queue - traffic class queue (0 .. 3)
+ */
+ define sw_interface_set_dpdk_hqos_tctbl {
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u32 entry;
+ u32 tc;
+ u32 queue;
+ };
+
+ /** \\brief DPDK interface HQoS tctbl entry set reply
+ @param context - sender context, to match reply w/ request
+ @param retval - request return code
+ */
+ define sw_interface_set_dpdk_hqos_tctbl_reply {
+ u32 context;
+ i32 retval;
+ };
+```