aboutsummaryrefslogtreecommitdiffstats
path: root/app/test-pmd
diff options
context:
space:
mode:
Diffstat (limited to 'app/test-pmd')
-rw-r--r--app/test-pmd/Makefile5
-rw-r--r--app/test-pmd/bpf_cmd.c175
-rw-r--r--app/test-pmd/bpf_cmd.h16
-rw-r--r--app/test-pmd/cmdline.c1401
-rw-r--r--app/test-pmd/cmdline_flow.c1099
-rw-r--r--app/test-pmd/cmdline_tm.c148
-rw-r--r--app/test-pmd/cmdline_tm.h2
-rw-r--r--app/test-pmd/config.c411
-rw-r--r--app/test-pmd/csumonly.c106
-rw-r--r--app/test-pmd/macfwd.c3
-rw-r--r--app/test-pmd/macswap.c3
-rw-r--r--app/test-pmd/meson.build27
-rw-r--r--app/test-pmd/parameters.c82
-rw-r--r--app/test-pmd/testpmd.c373
-rw-r--r--app/test-pmd/testpmd.h65
15 files changed, 3480 insertions, 436 deletions
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index ed588ab6..a5a827bb 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -33,6 +33,7 @@ SRCS-y += txonly.c
SRCS-y += csumonly.c
SRCS-y += icmpecho.c
SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_cmd.c
ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC)$(CONFIG_RTE_LIBRTE_SCHED),yy)
SRCS-y += tm.c
@@ -44,8 +45,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
LDLIBS += -lrte_pmd_bond
endif
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA_PMD),y)
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS)$(CONFIG_RTE_LIBRTE_DPAA_PMD),yy)
LDLIBS += -lrte_pmd_dpaa
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
endif
ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
diff --git a/app/test-pmd/bpf_cmd.c b/app/test-pmd/bpf_cmd.c
new file mode 100644
index 00000000..584fad90
--- /dev/null
+++ b/app/test-pmd/bpf_cmd.c
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_bpf_ethdev.h>
+
+#include <cmdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include "testpmd.h"
+
+static const struct rte_bpf_xsym bpf_xsym[] = {
+ {
+ .name = RTE_STR(stdout),
+ .type = RTE_BPF_XTYPE_VAR,
+ .var = &stdout,
+ },
+ {
+ .name = RTE_STR(rte_pktmbuf_dump),
+ .type = RTE_BPF_XTYPE_FUNC,
+ .func = (void *)rte_pktmbuf_dump,
+ },
+};
+
+/* *** load BPF program *** */
+struct cmd_bpf_ld_result {
+ cmdline_fixed_string_t bpf;
+ cmdline_fixed_string_t dir;
+ uint8_t port;
+ uint16_t queue;
+ cmdline_fixed_string_t op;
+ cmdline_fixed_string_t flags;
+ cmdline_fixed_string_t prm;
+};
+
+static void
+bpf_parse_flags(const char *str, struct rte_bpf_arg *arg, uint32_t *flags)
+{
+ uint32_t i, v;
+
+ *flags = RTE_BPF_ETH_F_NONE;
+ arg->type = RTE_BPF_ARG_PTR;
+ arg->size = mbuf_data_size;
+
+ for (i = 0; str[i] != 0; i++) {
+ v = toupper(str[i]);
+ if (v == 'J')
+ *flags |= RTE_BPF_ETH_F_JIT;
+ else if (v == 'M') {
+ arg->type = RTE_BPF_ARG_PTR_MBUF;
+ arg->size = sizeof(struct rte_mbuf);
+ arg->buf_size = mbuf_data_size;
+ } else if (v == '-')
+ continue;
+ else
+ printf("unknown flag: \'%c\'", v);
+ }
+}
+
+static void cmd_operate_bpf_ld_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int32_t rc;
+ uint32_t flags;
+ struct cmd_bpf_ld_result *res;
+ struct rte_bpf_prm prm;
+ const char *fname, *sname;
+
+ res = parsed_result;
+ memset(&prm, 0, sizeof(prm));
+ prm.xsym = bpf_xsym;
+ prm.nb_xsym = RTE_DIM(bpf_xsym);
+
+ bpf_parse_flags(res->flags, &prm.prog_arg, &flags);
+ fname = res->prm;
+ sname = ".text";
+
+ if (strcmp(res->dir, "rx") == 0) {
+ rc = rte_bpf_eth_rx_elf_load(res->port, res->queue, &prm,
+ fname, sname, flags);
+ printf("%d:%s\n", rc, strerror(-rc));
+ } else if (strcmp(res->dir, "tx") == 0) {
+ rc = rte_bpf_eth_tx_elf_load(res->port, res->queue, &prm,
+ fname, sname, flags);
+ printf("%d:%s\n", rc, strerror(-rc));
+ } else
+ printf("invalid value: %s\n", res->dir);
+}
+
+cmdline_parse_token_string_t cmd_load_bpf_start =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result,
+ bpf, "bpf-load");
+cmdline_parse_token_string_t cmd_load_bpf_dir =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result,
+ dir, "rx#tx");
+cmdline_parse_token_num_t cmd_load_bpf_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, port, UINT8);
+cmdline_parse_token_num_t cmd_load_bpf_queue =
+ TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, queue, UINT16);
+cmdline_parse_token_string_t cmd_load_bpf_flags =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result,
+ flags, NULL);
+cmdline_parse_token_string_t cmd_load_bpf_prm =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result,
+ prm, NULL);
+
+cmdline_parse_inst_t cmd_operate_bpf_ld_parse = {
+ .f = cmd_operate_bpf_ld_parsed,
+ .data = NULL,
+ .help_str = "bpf-load rx|tx <port> <queue> <J|M|B> <file_name>",
+ .tokens = {
+ (void *)&cmd_load_bpf_start,
+ (void *)&cmd_load_bpf_dir,
+ (void *)&cmd_load_bpf_port,
+ (void *)&cmd_load_bpf_queue,
+ (void *)&cmd_load_bpf_flags,
+ (void *)&cmd_load_bpf_prm,
+ NULL,
+ },
+};
+
+/* *** unload BPF program *** */
+struct cmd_bpf_unld_result {
+ cmdline_fixed_string_t bpf;
+ cmdline_fixed_string_t dir;
+ uint8_t port;
+ uint16_t queue;
+};
+
+static void cmd_operate_bpf_unld_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_bpf_unld_result *res;
+
+ res = parsed_result;
+
+ if (strcmp(res->dir, "rx") == 0)
+ rte_bpf_eth_rx_unload(res->port, res->queue);
+ else if (strcmp(res->dir, "tx") == 0)
+ rte_bpf_eth_tx_unload(res->port, res->queue);
+ else
+ printf("invalid value: %s\n", res->dir);
+}
+
+cmdline_parse_token_string_t cmd_unload_bpf_start =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_unld_result,
+ bpf, "bpf-unload");
+cmdline_parse_token_string_t cmd_unload_bpf_dir =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_unld_result,
+ dir, "rx#tx");
+cmdline_parse_token_num_t cmd_unload_bpf_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, port, UINT8);
+cmdline_parse_token_num_t cmd_unload_bpf_queue =
+ TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, queue, UINT16);
+
+cmdline_parse_inst_t cmd_operate_bpf_unld_parse = {
+ .f = cmd_operate_bpf_unld_parsed,
+ .data = NULL,
+ .help_str = "bpf-unload rx|tx <port> <queue>",
+ .tokens = {
+ (void *)&cmd_unload_bpf_start,
+ (void *)&cmd_unload_bpf_dir,
+ (void *)&cmd_unload_bpf_port,
+ (void *)&cmd_unload_bpf_queue,
+ NULL,
+ },
+};
diff --git a/app/test-pmd/bpf_cmd.h b/app/test-pmd/bpf_cmd.h
new file mode 100644
index 00000000..5ee4c9f7
--- /dev/null
+++ b/app/test-pmd/bpf_cmd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _BPF_CMD_H_
+#define _BPF_CMD_H_
+
+#ifdef RTE_LIBRTE_BPF
+
+ /* BPF CLI */
+extern cmdline_parse_inst_t cmd_operate_bpf_ld_parse;
+extern cmdline_parse_inst_t cmd_operate_bpf_unld_parse;
+
+#endif /* RTE_LIBRTE_BPF */
+
+#endif /* _BPF_CMD_H_ */
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index d1dc1de6..27e2aa8c 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -60,7 +60,7 @@
#include <rte_eth_bond.h>
#include <rte_eth_bond_8023ad.h>
#endif
-#ifdef RTE_LIBRTE_DPAA_PMD
+#if defined RTE_LIBRTE_DPAA_BUS && defined RTE_LIBRTE_DPAA_PMD
#include <rte_pmd_dpaa.h>
#endif
#ifdef RTE_LIBRTE_IXGBE_PMD
@@ -75,6 +75,7 @@
#include "testpmd.h"
#include "cmdline_mtr.h"
#include "cmdline_tm.h"
+#include "bpf_cmd.h"
static struct cmdline *testpmd_cl;
@@ -771,6 +772,12 @@ static void cmd_help_long_parsed(void *parsed_result,
" (priority) (weight)\n"
" Set port tm node parent.\n\n"
+ "suspend port tm node (port_id) (node_id)"
+ " Suspend tm node.\n\n"
+
+ "resume port tm node (port_id) (node_id)"
+ " Resume tm node.\n\n"
+
"port tm hierarchy commit (port_id) (clean_on_fail)\n"
" Commit tm hierarchy.\n\n"
@@ -806,6 +813,9 @@ static void cmd_help_long_parsed(void *parsed_result,
" duplex (half|full|auto)\n"
" Set speed and duplex for all ports or port_id\n\n"
+ "port config (port_id|all) loopback (mode)\n"
+ " Set loopback mode for all ports or port_id\n\n"
+
"port config all (rxq|txq|rxd|txd) (value)\n"
" Set number for rxq/txq/rxd/txd.\n\n"
@@ -818,8 +828,8 @@ static void cmd_help_long_parsed(void *parsed_result,
" Set crc-strip/scatter/rx-checksum/hardware-vlan/drop_en"
" for ports.\n\n"
- "port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|"
- "geneve|nvgre|none|<flowtype_id>)\n"
+ "port config all rss (all|default|ip|tcp|udp|sctp|"
+ "ether|port|vxlan|geneve|nvgre|none|<flowtype_id>)\n"
" Set the RSS mode.\n\n"
"port config port-id rss reta (hash,queue)[,(hash,queue)]\n"
@@ -843,10 +853,18 @@ static void cmd_help_long_parsed(void *parsed_result,
"port config mtu X value\n"
" Set the MTU of port X to a given value\n\n"
+ "port config (port_id) (rxq|txq) (queue_id) ring_size (value)\n"
+ " Set a rx/tx queue's ring size configuration, the new"
+ " value will take effect after command that (re-)start the port"
+ " or command that setup the specific queue\n\n"
+
"port (port_id) (rxq|txq) (queue_id) (start|stop)\n"
" Start/stop a rx/tx queue of port X. Only take effect"
" when port X is started\n\n"
+ "port (port_id) (rxq|txq) (queue_id) setup\n"
+ " Setup a rx/tx queue of port X.\n\n"
+
"port config (port_id|all) l2-tunnel E-tag ether-type"
" (value)\n"
" Set the value of E-tag ether-type.\n\n"
@@ -870,6 +888,9 @@ static void cmd_help_long_parsed(void *parsed_result,
"port config (port_id) pctype (pctype_id) hash_inset|"
"fdir_inset|fdir_flx_inset clear all"
" Clear RSS|FDIR|FDIR_FLX input set completely for some pctype\n\n"
+
+ "port config (port_id) udp_tunnel_port add|rm vxlan|geneve (udp_port)\n\n"
+ " Add/remove UDP tunnel port for tunneling offload\n\n"
);
}
@@ -1415,7 +1436,7 @@ cmdline_parse_inst_t cmd_config_speed_all = {
struct cmd_config_speed_specific {
cmdline_fixed_string_t port;
cmdline_fixed_string_t keyword;
- uint8_t id;
+ portid_t id;
cmdline_fixed_string_t item1;
cmdline_fixed_string_t item2;
cmdline_fixed_string_t value1;
@@ -1455,7 +1476,7 @@ cmdline_parse_token_string_t cmd_config_speed_specific_keyword =
TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, keyword,
"config");
cmdline_parse_token_num_t cmd_config_speed_specific_id =
- TOKEN_NUM_INITIALIZER(struct cmd_config_speed_specific, id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_config_speed_specific, id, UINT16);
cmdline_parse_token_string_t cmd_config_speed_specific_item1 =
TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, item1,
"speed");
@@ -1487,6 +1508,122 @@ cmdline_parse_inst_t cmd_config_speed_specific = {
},
};
+/* *** configure loopback for all ports *** */
+struct cmd_config_loopback_all {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t item;
+ uint32_t mode;
+};
+
+static void
+cmd_config_loopback_all_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_loopback_all *res = parsed_result;
+ portid_t pid;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ ports[pid].dev_conf.lpbk_mode = res->mode;
+ }
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_loopback_all_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, port, "port");
+cmdline_parse_token_string_t cmd_config_loopback_all_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, keyword,
+ "config");
+cmdline_parse_token_string_t cmd_config_loopback_all_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, all, "all");
+cmdline_parse_token_string_t cmd_config_loopback_all_item =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, item,
+ "loopback");
+cmdline_parse_token_num_t cmd_config_loopback_all_mode =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_all, mode, UINT32);
+
+cmdline_parse_inst_t cmd_config_loopback_all = {
+ .f = cmd_config_loopback_all_parsed,
+ .data = NULL,
+ .help_str = "port config all loopback <mode>",
+ .tokens = {
+ (void *)&cmd_config_loopback_all_port,
+ (void *)&cmd_config_loopback_all_keyword,
+ (void *)&cmd_config_loopback_all_all,
+ (void *)&cmd_config_loopback_all_item,
+ (void *)&cmd_config_loopback_all_mode,
+ NULL,
+ },
+};
+
+/* *** configure loopback for specific port *** */
+struct cmd_config_loopback_specific {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ uint16_t port_id;
+ cmdline_fixed_string_t item;
+ uint32_t mode;
+};
+
+static void
+cmd_config_loopback_specific_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_loopback_specific *res = parsed_result;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ if (!port_is_stopped(res->port_id)) {
+ printf("Please stop port %u first\n", res->port_id);
+ return;
+ }
+
+ ports[res->port_id].dev_conf.lpbk_mode = res->mode;
+
+ cmd_reconfig_device_queue(res->port_id, 1, 1);
+}
+
+
+cmdline_parse_token_string_t cmd_config_loopback_specific_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, port,
+ "port");
+cmdline_parse_token_string_t cmd_config_loopback_specific_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, keyword,
+ "config");
+cmdline_parse_token_num_t cmd_config_loopback_specific_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_specific, port_id,
+ UINT16);
+cmdline_parse_token_string_t cmd_config_loopback_specific_item =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, item,
+ "loopback");
+cmdline_parse_token_num_t cmd_config_loopback_specific_mode =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_specific, mode,
+ UINT32);
+
+cmdline_parse_inst_t cmd_config_loopback_specific = {
+ .f = cmd_config_loopback_specific_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> loopback <mode>",
+ .tokens = {
+ (void *)&cmd_config_loopback_specific_port,
+ (void *)&cmd_config_loopback_specific_keyword,
+ (void *)&cmd_config_loopback_specific_id,
+ (void *)&cmd_config_loopback_specific_item,
+ (void *)&cmd_config_loopback_specific_mode,
+ NULL,
+ },
+};
+
/* *** configure txq/rxq, txd/rxd *** */
struct cmd_config_rx_tx {
cmdline_fixed_string_t port;
@@ -1879,8 +2016,11 @@ cmd_config_rss_parsed(void *parsed_result,
{
struct cmd_config_rss *res = parsed_result;
struct rte_eth_rss_conf rss_conf = { .rss_key_len = 0, };
+ struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
+ int use_default = 0;
+ int all_updated = 1;
int diag;
- uint8_t i;
+ uint16_t i;
if (!strcmp(res->value, "all"))
rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP |
@@ -1906,6 +2046,8 @@ cmd_config_rss_parsed(void *parsed_result,
rss_conf.rss_hf = ETH_RSS_NVGRE;
else if (!strcmp(res->value, "none"))
rss_conf.rss_hf = 0;
+ else if (!strcmp(res->value, "default"))
+ use_default = 1;
else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
atoi(res->value) < 64)
rss_conf.rss_hf = 1ULL << atoi(res->value);
@@ -1914,13 +2056,22 @@ cmd_config_rss_parsed(void *parsed_result,
return;
}
rss_conf.rss_key = NULL;
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ /* Update global configuration for RSS types. */
+ RTE_ETH_FOREACH_DEV(i) {
+ if (use_default) {
+ rte_eth_dev_info_get(i, &dev_info);
+ rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+ }
diag = rte_eth_dev_rss_hash_update(i, &rss_conf);
- if (diag < 0)
+ if (diag < 0) {
+ all_updated = 0;
printf("Configuration of RSS hash at ethernet port %d "
"failed with error (%d): %s.\n",
i, -diag, strerror(-diag));
+ }
}
+ if (all_updated && !use_default)
+ rss_hf = rss_conf.rss_hf;
}
cmdline_parse_token_string_t cmd_config_rss_port =
@@ -1938,7 +2089,7 @@ cmdline_parse_inst_t cmd_config_rss = {
.f = cmd_config_rss_parsed,
.data = NULL,
.help_str = "port config all rss "
- "all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none|<flowtype_id>",
+ "all|default|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none|<flowtype_id>",
.tokens = {
(void *)&cmd_config_rss_port,
(void *)&cmd_config_rss_keyword,
@@ -2067,6 +2218,102 @@ cmdline_parse_inst_t cmd_config_rss_hash_key = {
},
};
+/* *** configure port rxq/txq ring size *** */
+struct cmd_config_rxtx_ring_size {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t portid;
+ cmdline_fixed_string_t rxtxq;
+ uint16_t qid;
+ cmdline_fixed_string_t rsize;
+ uint16_t size;
+};
+
+static void
+cmd_config_rxtx_ring_size_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_rxtx_ring_size *res = parsed_result;
+ struct rte_port *port;
+ uint8_t isrx;
+
+ if (port_id_is_invalid(res->portid, ENABLED_WARN))
+ return;
+
+ if (res->portid == (portid_t)RTE_PORT_ALL) {
+ printf("Invalid port id\n");
+ return;
+ }
+
+ port = &ports[res->portid];
+
+ if (!strcmp(res->rxtxq, "rxq"))
+ isrx = 1;
+ else if (!strcmp(res->rxtxq, "txq"))
+ isrx = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (isrx && rx_queue_id_is_invalid(res->qid))
+ return;
+ else if (!isrx && tx_queue_id_is_invalid(res->qid))
+ return;
+
+ if (isrx && res->size != 0 && res->size <= rx_free_thresh) {
+ printf("Invalid rx ring_size, must > rx_free_thresh: %d\n",
+ rx_free_thresh);
+ return;
+ }
+
+ if (isrx)
+ port->nb_rx_desc[res->qid] = res->size;
+ else
+ port->nb_tx_desc[res->qid] = res->size;
+
+ cmd_reconfig_device_queue(res->portid, 0, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_rxtx_ring_size_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_rxtx_ring_size_config =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ config, "config");
+cmdline_parse_token_num_t cmd_config_rxtx_ring_size_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ portid, UINT16);
+cmdline_parse_token_string_t cmd_config_rxtx_ring_size_rxtxq =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ rxtxq, "rxq#txq");
+cmdline_parse_token_num_t cmd_config_rxtx_ring_size_qid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ qid, UINT16);
+cmdline_parse_token_string_t cmd_config_rxtx_ring_size_rsize =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ rsize, "ring_size");
+cmdline_parse_token_num_t cmd_config_rxtx_ring_size_size =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ size, UINT16);
+
+cmdline_parse_inst_t cmd_config_rxtx_ring_size = {
+ .f = cmd_config_rxtx_ring_size_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> rxq|txq <queue_id> ring_size <value>",
+ .tokens = {
+ (void *)&cmd_config_rxtx_ring_size_port,
+ (void *)&cmd_config_rxtx_ring_size_config,
+ (void *)&cmd_config_rxtx_ring_size_portid,
+ (void *)&cmd_config_rxtx_ring_size_rxtxq,
+ (void *)&cmd_config_rxtx_ring_size_qid,
+ (void *)&cmd_config_rxtx_ring_size_rsize,
+ (void *)&cmd_config_rxtx_ring_size_size,
+ NULL,
+ },
+};
+
/* *** configure port rxq/txq start/stop *** */
struct cmd_config_rxtx_queue {
cmdline_fixed_string_t port;
@@ -2152,7 +2399,7 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = {
.data = NULL,
.help_str = "port <port_id> rxq|txq <queue_id> start|stop",
.tokens = {
- (void *)&cmd_config_speed_all_port,
+ (void *)&cmd_config_rxtx_queue_port,
(void *)&cmd_config_rxtx_queue_portid,
(void *)&cmd_config_rxtx_queue_rxtxq,
(void *)&cmd_config_rxtx_queue_qid,
@@ -2161,6 +2408,117 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = {
},
};
+/* *** configure port rxq/txq setup *** */
+struct cmd_setup_rxtx_queue {
+ cmdline_fixed_string_t port;
+ portid_t portid;
+ cmdline_fixed_string_t rxtxq;
+ uint16_t qid;
+ cmdline_fixed_string_t setup;
+};
+
+/* Common CLI fields for queue setup */
+cmdline_parse_token_string_t cmd_setup_rxtx_queue_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, port, "port");
+cmdline_parse_token_num_t cmd_setup_rxtx_queue_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_setup_rxtx_queue, portid, UINT16);
+cmdline_parse_token_string_t cmd_setup_rxtx_queue_rxtxq =
+ TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, rxtxq, "rxq#txq");
+cmdline_parse_token_num_t cmd_setup_rxtx_queue_qid =
+ TOKEN_NUM_INITIALIZER(struct cmd_setup_rxtx_queue, qid, UINT16);
+cmdline_parse_token_string_t cmd_setup_rxtx_queue_setup =
+ TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, setup, "setup");
+
+static void
+cmd_setup_rxtx_queue_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_setup_rxtx_queue *res = parsed_result;
+ struct rte_port *port;
+ struct rte_mempool *mp;
+ unsigned int socket_id;
+ uint8_t isrx = 0;
+ int ret;
+
+ if (port_id_is_invalid(res->portid, ENABLED_WARN))
+ return;
+
+ if (res->portid == (portid_t)RTE_PORT_ALL) {
+ printf("Invalid port id\n");
+ return;
+ }
+
+ if (!strcmp(res->rxtxq, "rxq"))
+ isrx = 1;
+ else if (!strcmp(res->rxtxq, "txq"))
+ isrx = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (isrx && rx_queue_id_is_invalid(res->qid)) {
+ printf("Invalid rx queue\n");
+ return;
+ } else if (!isrx && tx_queue_id_is_invalid(res->qid)) {
+ printf("Invalid tx queue\n");
+ return;
+ }
+
+ port = &ports[res->portid];
+ if (isrx) {
+ socket_id = rxring_numa[res->portid];
+ if (!numa_support || socket_id == NUMA_NO_CONFIG)
+ socket_id = port->socket_id;
+
+ mp = mbuf_pool_find(socket_id);
+ if (mp == NULL) {
+ printf("Failed to setup RX queue: "
+ "No mempool allocation"
+ " on the socket %d\n",
+ rxring_numa[res->portid]);
+ return;
+ }
+ ret = rte_eth_rx_queue_setup(res->portid,
+ res->qid,
+ port->nb_rx_desc[res->qid],
+ socket_id,
+ &port->rx_conf[res->qid],
+ mp);
+ if (ret)
+ printf("Failed to setup RX queue\n");
+ } else {
+ socket_id = txring_numa[res->portid];
+ if (!numa_support || socket_id == NUMA_NO_CONFIG)
+ socket_id = port->socket_id;
+
+ ret = rte_eth_tx_queue_setup(res->portid,
+ res->qid,
+ port->nb_tx_desc[res->qid],
+ socket_id,
+ &port->tx_conf[res->qid]);
+ if (ret)
+ printf("Failed to setup TX queue\n");
+ }
+}
+
+cmdline_parse_inst_t cmd_setup_rxtx_queue = {
+ .f = cmd_setup_rxtx_queue_parsed,
+ .data = NULL,
+ .help_str = "port <port_id> rxq|txq <queue_idx> setup",
+ .tokens = {
+ (void *)&cmd_setup_rxtx_queue_port,
+ (void *)&cmd_setup_rxtx_queue_portid,
+ (void *)&cmd_setup_rxtx_queue_rxtxq,
+ (void *)&cmd_setup_rxtx_queue_qid,
+ (void *)&cmd_setup_rxtx_queue_setup,
+ NULL,
+ },
+};
+
+
/* *** Configure RSS RETA *** */
struct cmd_config_rss_reta {
cmdline_fixed_string_t port;
@@ -2599,6 +2957,8 @@ cmd_config_burst_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_config_burst *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ uint16_t rec_nb_pkts;
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
@@ -2606,11 +2966,34 @@ cmd_config_burst_parsed(void *parsed_result,
}
if (!strcmp(res->name, "burst")) {
- if (res->value < 1 || res->value > MAX_PKT_BURST) {
+ if (res->value == 0) {
+ /* If user gives a value of zero, query the PMD for
+ * its recommended Rx burst size. Testpmd uses a single
+ * size for all ports, so assume all ports are the same
+ * NIC model and use the values from Port 0.
+ */
+ rte_eth_dev_info_get(0, &dev_info);
+ rec_nb_pkts = dev_info.default_rxportconf.burst_size;
+
+ if (rec_nb_pkts == 0) {
+ printf("PMD does not recommend a burst size.\n"
+ "User provided value must be between"
+ " 1 and %d\n", MAX_PKT_BURST);
+ return;
+ } else if (rec_nb_pkts > MAX_PKT_BURST) {
+ printf("PMD recommended burst size of %d"
+ " exceeds maximum value of %d\n",
+ rec_nb_pkts, MAX_PKT_BURST);
+ return;
+ }
+ printf("Using PMD-provided burst value of %d\n",
+ rec_nb_pkts);
+ nb_pkt_per_burst = rec_nb_pkts;
+ } else if (res->value > MAX_PKT_BURST) {
printf("burst must be >= 1 && <= %d\n", MAX_PKT_BURST);
return;
- }
- nb_pkt_per_burst = res->value;
+ } else
+ nb_pkt_per_burst = res->value;
} else {
printf("Unknown parameter\n");
return;
@@ -4013,6 +4396,12 @@ check_tunnel_tso_nic_support(portid_t port_id)
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
printf("Warning: GENEVE TUNNEL TSO not supported therefore "
"not enabled for port %d\n", port_id);
+ if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+ printf("Warning: IP TUNNEL TSO not supported therefore "
+ "not enabled for port %d\n", port_id);
+ if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+ printf("Warning: UDP TUNNEL TSO not supported therefore "
+ "not enabled for port %d\n", port_id);
return dev_info;
}
@@ -4040,13 +4429,17 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
printf("TSO for tunneled packets is disabled\n");
} else {
uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
ports[res->port_id].dev_conf.txmode.offloads |=
(tso_offloads & dev_info.tx_offload_capa);
@@ -5402,7 +5795,7 @@ static void cmd_create_bonded_device_parsed(void *parsed_result,
port_id);
/* Update number of ports */
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
reconfig(port_id, res->socket);
rte_eth_promiscuous_enable(port_id);
}
@@ -5511,11 +5904,6 @@ static void cmd_set_bond_mon_period_parsed(void *parsed_result,
struct cmd_set_bond_mon_period_result *res = parsed_result;
int ret;
- if (res->port_num >= nb_ports) {
- printf("Port id %d must be less than %d\n", res->port_num, nb_ports);
- return;
- }
-
ret = rte_eth_bond_link_monitoring_set(res->port_num, res->period_ms);
/* check the return value and print it if is < 0 */
@@ -5572,12 +5960,6 @@ cmd_set_bonding_agg_mode(void *parsed_result,
struct cmd_set_bonding_agg_mode_policy_result *res = parsed_result;
uint8_t policy = AGG_BANDWIDTH;
- if (res->port_num >= nb_ports) {
- printf("Port id %d must be less than %d\n",
- res->port_num, nb_ports);
- return;
- }
-
if (!strcmp(res->policy, "bandwidth"))
policy = AGG_BANDWIDTH;
else if (!strcmp(res->policy, "stable"))
@@ -8278,6 +8660,89 @@ cmdline_parse_inst_t cmd_tunnel_udp_config = {
},
};
+struct cmd_config_tunnel_udp_port {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t port_id;
+ cmdline_fixed_string_t udp_tunnel_port;
+ cmdline_fixed_string_t action;
+ cmdline_fixed_string_t tunnel_type;
+ uint16_t udp_port;
+};
+
+static void
+cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_tunnel_udp_port *res = parsed_result;
+ struct rte_eth_udp_tunnel tunnel_udp;
+ int ret = 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ tunnel_udp.udp_port = res->udp_port;
+
+ if (!strcmp(res->tunnel_type, "vxlan")) {
+ tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+ } else if (!strcmp(res->tunnel_type, "geneve")) {
+ tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+ } else {
+ printf("Invalid tunnel type\n");
+ return;
+ }
+
+ if (!strcmp(res->action, "add"))
+ ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
+ &tunnel_udp);
+ else
+ ret = rte_eth_dev_udp_tunnel_port_delete(res->port_id,
+ &tunnel_udp);
+
+ if (ret < 0)
+ printf("udp tunneling port add error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, port,
+ "port");
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_config =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, config,
+ "config");
+cmdline_parse_token_num_t cmd_config_tunnel_udp_port_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, port_id,
+ UINT16);
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port,
+ udp_tunnel_port,
+ "udp_tunnel_port");
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, action,
+ "add#rm");
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, tunnel_type,
+ "vxlan#geneve");
+cmdline_parse_token_num_t cmd_config_tunnel_udp_port_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, udp_port,
+ UINT16);
+
+cmdline_parse_inst_t cmd_cfg_tunnel_udp_port = {
+ .f = cmd_cfg_tunnel_udp_port_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> udp_tunnel_port add|rm vxlan|geneve <udp_port>",
+ .tokens = {
+ (void *)&cmd_config_tunnel_udp_port_port,
+ (void *)&cmd_config_tunnel_udp_port_config,
+ (void *)&cmd_config_tunnel_udp_port_port_id,
+ (void *)&cmd_config_tunnel_udp_port_tunnel_port,
+ (void *)&cmd_config_tunnel_udp_port_action,
+ (void *)&cmd_config_tunnel_udp_port_tunnel_type,
+ (void *)&cmd_config_tunnel_udp_port_value,
+ NULL,
+ },
+};
+
/* *** GLOBAL CONFIG *** */
struct cmd_global_config_result {
cmdline_fixed_string_t cmd;
@@ -8621,7 +9086,7 @@ static void cmd_dump_parsed(void *parsed_result,
else if (!strcmp(res->dump, "dump_mempool"))
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
- rte_eal_devargs_dump(stdout);
+ rte_devargs_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -10725,11 +11190,6 @@ cmd_flow_director_mask_parsed(void *parsed_result,
struct rte_eth_fdir_masks *mask;
struct rte_port *port;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
port = &ports[res->port_id];
/** Check if the port is not started **/
if (port->port_status != RTE_PORT_STOPPED) {
@@ -10926,11 +11386,6 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result,
uint16_t i;
int ret;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
port = &ports[res->port_id];
/** Check if the port is not started **/
if (port->port_status != RTE_PORT_STOPPED) {
@@ -11082,11 +11537,6 @@ cmd_flow_director_flxpld_parsed(void *parsed_result,
struct rte_port *port;
int ret = 0;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
port = &ports[res->port_id];
/** Check if the port is not started **/
if (port->port_status != RTE_PORT_STOPPED) {
@@ -11728,7 +12178,7 @@ struct cmd_config_l2_tunnel_eth_type_result {
cmdline_fixed_string_t port;
cmdline_fixed_string_t config;
cmdline_fixed_string_t all;
- uint8_t id;
+ portid_t id;
cmdline_fixed_string_t l2_tunnel;
cmdline_fixed_string_t l2_tunnel_type;
cmdline_fixed_string_t eth_type;
@@ -11750,7 +12200,7 @@ cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_all_str =
cmdline_parse_token_num_t cmd_config_l2_tunnel_eth_type_id =
TOKEN_NUM_INITIALIZER
(struct cmd_config_l2_tunnel_eth_type_result,
- id, UINT8);
+ id, UINT16);
cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_l2_tunnel =
TOKEN_STRING_INITIALIZER
(struct cmd_config_l2_tunnel_eth_type_result,
@@ -11863,7 +12313,7 @@ struct cmd_config_l2_tunnel_en_dis_result {
cmdline_fixed_string_t port;
cmdline_fixed_string_t config;
cmdline_fixed_string_t all;
- uint8_t id;
+ portid_t id;
cmdline_fixed_string_t l2_tunnel;
cmdline_fixed_string_t l2_tunnel_type;
cmdline_fixed_string_t en_dis;
@@ -11884,7 +12334,7 @@ cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_all_str =
cmdline_parse_token_num_t cmd_config_l2_tunnel_en_dis_id =
TOKEN_NUM_INITIALIZER
(struct cmd_config_l2_tunnel_en_dis_result,
- id, UINT8);
+ id, UINT16);
cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_l2_tunnel =
TOKEN_STRING_INITIALIZER
(struct cmd_config_l2_tunnel_en_dis_result,
@@ -12861,7 +13311,7 @@ cmd_set_tx_loopback_parsed(
if (ret == -ENOTSUP)
ret = rte_pmd_bnxt_set_tx_loopback(res->port_id, is_on);
#endif
-#ifdef RTE_LIBRTE_DPAA_PMD
+#if defined RTE_LIBRTE_DPAA_BUS && defined RTE_LIBRTE_DPAA_PMD
if (ret == -ENOTSUP)
ret = rte_pmd_dpaa_set_tx_loopback(res->port_id, is_on);
#endif
@@ -14467,11 +14917,6 @@ cmd_ddp_add_parsed(
int file_num;
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
@@ -14549,11 +14994,6 @@ cmd_ddp_del_parsed(
uint32_t size;
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
@@ -14850,12 +15290,12 @@ cmdline_parse_token_num_t cmd_ddp_get_list_port_id =
static void
cmd_ddp_get_list_parsed(
- void *parsed_result,
+ __attribute__((unused)) void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_ddp_get_list_result *res = parsed_result;
#ifdef RTE_LIBRTE_I40E_PMD
+ struct cmd_ddp_get_list_result *res = parsed_result;
struct rte_pmd_i40e_profile_list *p_list;
struct rte_pmd_i40e_profile_info *p_info;
uint32_t p_num;
@@ -14864,11 +15304,6 @@ cmd_ddp_get_list_parsed(
#endif
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
#ifdef RTE_LIBRTE_I40E_PMD
size = PROFILE_INFO_SIZE * MAX_PROFILE_NUM + 4;
p_list = (struct rte_pmd_i40e_profile_list *)malloc(size);
@@ -14931,22 +15366,17 @@ struct cmd_cfg_input_set_result {
static void
cmd_cfg_input_set_parsed(
- void *parsed_result,
+ __attribute__((unused)) void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_cfg_input_set_result *res = parsed_result;
#ifdef RTE_LIBRTE_I40E_PMD
+ struct cmd_cfg_input_set_result *res = parsed_result;
enum rte_pmd_i40e_inset_type inset_type = INSET_NONE;
struct rte_pmd_i40e_inset inset;
#endif
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
@@ -15059,22 +15489,17 @@ struct cmd_clear_input_set_result {
static void
cmd_clear_input_set_parsed(
- void *parsed_result,
+ __attribute__((unused)) void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_clear_input_set_result *res = parsed_result;
#ifdef RTE_LIBRTE_I40E_PMD
+ struct cmd_clear_input_set_result *res = parsed_result;
enum rte_pmd_i40e_inset_type inset_type = INSET_NONE;
struct rte_pmd_i40e_inset inset;
#endif
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
@@ -16030,6 +16455,805 @@ cmdline_parse_inst_t cmd_load_from_file = {
},
};
+/* Get Rx offloads capabilities */
+struct cmd_rx_offload_get_capa_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t rx_offload;
+ cmdline_fixed_string_t capabilities;
+};
+
+cmdline_parse_token_string_t cmd_rx_offload_get_capa_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_rx_offload_get_capa_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_rx_offload_get_capa_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_rx_offload_get_capa_rx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ rx_offload, "rx_offload");
+cmdline_parse_token_string_t cmd_rx_offload_get_capa_capabilities =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ capabilities, "capabilities");
+
+static void
+print_rx_offloads(uint64_t offloads)
+{
+ uint64_t single_offload;
+ int begin;
+ int end;
+ int bit;
+
+ if (offloads == 0)
+ return;
+
+ begin = __builtin_ctzll(offloads);
+ end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads);
+
+ single_offload = 1 << begin;
+ for (bit = begin; bit < end; bit++) {
+ if (offloads & single_offload)
+ printf(" %s",
+ rte_eth_dev_rx_offload_name(single_offload));
+ single_offload <<= 1;
+ }
+}
+
+static void
+cmd_rx_offload_get_capa_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_rx_offload_get_capa_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ uint64_t queue_offloads;
+ uint64_t port_offloads;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ queue_offloads = dev_info.rx_queue_offload_capa;
+ port_offloads = dev_info.rx_offload_capa ^ queue_offloads;
+
+ printf("Rx Offloading Capabilities of port %d :\n", port_id);
+ printf(" Per Queue :");
+ print_rx_offloads(queue_offloads);
+
+ printf("\n");
+ printf(" Per Port :");
+ print_rx_offloads(port_offloads);
+ printf("\n\n");
+}
+
+cmdline_parse_inst_t cmd_rx_offload_get_capa = {
+ .f = cmd_rx_offload_get_capa_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> rx_offload capabilities",
+ .tokens = {
+ (void *)&cmd_rx_offload_get_capa_show,
+ (void *)&cmd_rx_offload_get_capa_port,
+ (void *)&cmd_rx_offload_get_capa_port_id,
+ (void *)&cmd_rx_offload_get_capa_rx_offload,
+ (void *)&cmd_rx_offload_get_capa_capabilities,
+ NULL,
+ }
+};
+
+/* Get Rx offloads configuration */
+struct cmd_rx_offload_get_configuration_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t rx_offload;
+ cmdline_fixed_string_t configuration;
+};
+
+cmdline_parse_token_string_t cmd_rx_offload_get_configuration_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_rx_offload_get_configuration_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_rx_offload_get_configuration_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_rx_offload_get_configuration_rx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ rx_offload, "rx_offload");
+cmdline_parse_token_string_t cmd_rx_offload_get_configuration_configuration =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ configuration, "configuration");
+
+static void
+cmd_rx_offload_get_configuration_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_rx_offload_get_configuration_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ struct rte_port *port = &ports[port_id];
+ uint64_t port_offloads;
+ uint64_t queue_offloads;
+ uint16_t nb_rx_queues;
+ int q;
+
+ printf("Rx Offloading Configuration of port %d :\n", port_id);
+
+ port_offloads = port->dev_conf.rxmode.offloads;
+ printf(" Port :");
+ print_rx_offloads(port_offloads);
+ printf("\n");
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ nb_rx_queues = dev_info.nb_rx_queues;
+ for (q = 0; q < nb_rx_queues; q++) {
+ queue_offloads = port->rx_conf[q].offloads;
+ printf(" Queue[%2d] :", q);
+ print_rx_offloads(queue_offloads);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+cmdline_parse_inst_t cmd_rx_offload_get_configuration = {
+ .f = cmd_rx_offload_get_configuration_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> rx_offload configuration",
+ .tokens = {
+ (void *)&cmd_rx_offload_get_configuration_show,
+ (void *)&cmd_rx_offload_get_configuration_port,
+ (void *)&cmd_rx_offload_get_configuration_port_id,
+ (void *)&cmd_rx_offload_get_configuration_rx_offload,
+ (void *)&cmd_rx_offload_get_configuration_configuration,
+ NULL,
+ }
+};
+
+/* Enable/Disable a per port offloading */
+struct cmd_config_per_port_rx_offload_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t port_id;
+ cmdline_fixed_string_t rx_offload;
+ cmdline_fixed_string_t offload;
+ cmdline_fixed_string_t on_off;
+};
+
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_config =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ config, "config");
+cmdline_parse_token_num_t cmd_config_per_port_rx_offload_result_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_rx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ rx_offload, "rx_offload");
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
+ "qinq_strip#outer_ipv4_cksum#macsec_strip#"
+ "header_split#vlan_filter#vlan_extend#jumbo_frame#"
+ "crc_strip#scatter#timestamp#security");
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ on_off, "on#off");
+
+static uint64_t
+search_rx_offload(const char *name)
+{
+ uint64_t single_offload;
+ const char *single_name;
+ int found = 0;
+ unsigned int bit;
+
+ single_offload = 1;
+ for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) {
+ single_name = rte_eth_dev_rx_offload_name(single_offload);
+ if (!strcasecmp(single_name, name)) {
+ found = 1;
+ break;
+ } else if (!strcasecmp(single_name, "UNKNOWN"))
+ break;
+ else if (single_name == NULL)
+ break;
+ single_offload <<= 1;
+ }
+
+ if (found)
+ return single_offload;
+
+ return 0;
+}
+
+static void
+cmd_config_per_port_rx_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_per_port_rx_offload_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ struct rte_eth_dev_info dev_info;
+ struct rte_port *port = &ports[port_id];
+ uint64_t single_offload;
+ uint16_t nb_rx_queues;
+ int q;
+
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Error: Can't config offload when Port %d "
+ "is not stopped\n", port_id);
+ return;
+ }
+
+ single_offload = search_rx_offload(res->offload);
+ if (single_offload == 0) {
+ printf("Unknown offload name: %s\n", res->offload);
+ return;
+ }
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ nb_rx_queues = dev_info.nb_rx_queues;
+ if (!strcmp(res->on_off, "on")) {
+ port->dev_conf.rxmode.offloads |= single_offload;
+ for (q = 0; q < nb_rx_queues; q++)
+ port->rx_conf[q].offloads |= single_offload;
+ } else {
+ port->dev_conf.rxmode.offloads &= ~single_offload;
+ for (q = 0; q < nb_rx_queues; q++)
+ port->rx_conf[q].offloads &= ~single_offload;
+ }
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_port_rx_offload = {
+ .f = cmd_config_per_port_rx_offload_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> rx_offload vlan_strip|ipv4_cksum|"
+ "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
+ "macsec_strip|header_split|vlan_filter|vlan_extend|"
+ "jumbo_frame|crc_strip|scatter|timestamp|security "
+ "on|off",
+ .tokens = {
+ (void *)&cmd_config_per_port_rx_offload_result_port,
+ (void *)&cmd_config_per_port_rx_offload_result_config,
+ (void *)&cmd_config_per_port_rx_offload_result_port_id,
+ (void *)&cmd_config_per_port_rx_offload_result_rx_offload,
+ (void *)&cmd_config_per_port_rx_offload_result_offload,
+ (void *)&cmd_config_per_port_rx_offload_result_on_off,
+ NULL,
+ }
+};
+
+/* Enable/Disable a per queue offloading */
+struct cmd_config_per_queue_rx_offload_result {
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t rxq;
+ uint16_t queue_id;
+ cmdline_fixed_string_t rx_offload;
+ cmdline_fixed_string_t offload;
+ cmdline_fixed_string_t on_off;
+};
+
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_config_per_queue_rx_offload_result_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_rxq =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ rxq, "rxq");
+cmdline_parse_token_num_t cmd_config_per_queue_rx_offload_result_queue_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ queue_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_rxoffload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ rx_offload, "rx_offload");
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
+ "qinq_strip#outer_ipv4_cksum#macsec_strip#"
+ "header_split#vlan_filter#vlan_extend#jumbo_frame#"
+ "crc_strip#scatter#timestamp#security");
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ on_off, "on#off");
+
+static void
+cmd_config_per_queue_rx_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_per_queue_rx_offload_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ uint16_t queue_id = res->queue_id;
+ struct rte_port *port = &ports[port_id];
+ uint64_t single_offload;
+
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Error: Can't config offload when Port %d "
+ "is not stopped\n", port_id);
+ return;
+ }
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (queue_id >= dev_info.nb_rx_queues) {
+ printf("Error: input queue_id should be 0 ... "
+ "%d\n", dev_info.nb_rx_queues - 1);
+ return;
+ }
+
+ single_offload = search_rx_offload(res->offload);
+ if (single_offload == 0) {
+ printf("Unknown offload name: %s\n", res->offload);
+ return;
+ }
+
+ if (!strcmp(res->on_off, "on"))
+ port->rx_conf[queue_id].offloads |= single_offload;
+ else
+ port->rx_conf[queue_id].offloads &= ~single_offload;
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_queue_rx_offload = {
+ .f = cmd_config_per_queue_rx_offload_parsed,
+ .data = NULL,
+ .help_str = "port <port_id> rxq <queue_id> rx_offload "
+ "vlan_strip|ipv4_cksum|"
+ "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
+ "macsec_strip|header_split|vlan_filter|vlan_extend|"
+ "jumbo_frame|crc_strip|scatter|timestamp|security "
+ "on|off",
+ .tokens = {
+ (void *)&cmd_config_per_queue_rx_offload_result_port,
+ (void *)&cmd_config_per_queue_rx_offload_result_port_id,
+ (void *)&cmd_config_per_queue_rx_offload_result_rxq,
+ (void *)&cmd_config_per_queue_rx_offload_result_queue_id,
+ (void *)&cmd_config_per_queue_rx_offload_result_rxoffload,
+ (void *)&cmd_config_per_queue_rx_offload_result_offload,
+ (void *)&cmd_config_per_queue_rx_offload_result_on_off,
+ NULL,
+ }
+};
+
+/* Get Tx offloads capabilities */
+struct cmd_tx_offload_get_capa_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t tx_offload;
+ cmdline_fixed_string_t capabilities;
+};
+
+cmdline_parse_token_string_t cmd_tx_offload_get_capa_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_tx_offload_get_capa_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_tx_offload_get_capa_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_tx_offload_get_capa_tx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ tx_offload, "tx_offload");
+cmdline_parse_token_string_t cmd_tx_offload_get_capa_capabilities =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ capabilities, "capabilities");
+
+static void
+print_tx_offloads(uint64_t offloads)
+{
+ uint64_t single_offload;
+ int begin;
+ int end;
+ int bit;
+
+ if (offloads == 0)
+ return;
+
+ begin = __builtin_ctzll(offloads);
+ end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads);
+
+ single_offload = 1 << begin;
+ for (bit = begin; bit < end; bit++) {
+ if (offloads & single_offload)
+ printf(" %s",
+ rte_eth_dev_tx_offload_name(single_offload));
+ single_offload <<= 1;
+ }
+}
+
+static void
+cmd_tx_offload_get_capa_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tx_offload_get_capa_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ uint64_t queue_offloads;
+ uint64_t port_offloads;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ queue_offloads = dev_info.tx_queue_offload_capa;
+ port_offloads = dev_info.tx_offload_capa ^ queue_offloads;
+
+ printf("Tx Offloading Capabilities of port %d :\n", port_id);
+ printf(" Per Queue :");
+ print_tx_offloads(queue_offloads);
+
+ printf("\n");
+ printf(" Per Port :");
+ print_tx_offloads(port_offloads);
+ printf("\n\n");
+}
+
+cmdline_parse_inst_t cmd_tx_offload_get_capa = {
+ .f = cmd_tx_offload_get_capa_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> tx_offload capabilities",
+ .tokens = {
+ (void *)&cmd_tx_offload_get_capa_show,
+ (void *)&cmd_tx_offload_get_capa_port,
+ (void *)&cmd_tx_offload_get_capa_port_id,
+ (void *)&cmd_tx_offload_get_capa_tx_offload,
+ (void *)&cmd_tx_offload_get_capa_capabilities,
+ NULL,
+ }
+};
+
+/* Get Tx offloads configuration */
+struct cmd_tx_offload_get_configuration_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t tx_offload;
+ cmdline_fixed_string_t configuration;
+};
+
+cmdline_parse_token_string_t cmd_tx_offload_get_configuration_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_tx_offload_get_configuration_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_tx_offload_get_configuration_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_tx_offload_get_configuration_tx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ tx_offload, "tx_offload");
+cmdline_parse_token_string_t cmd_tx_offload_get_configuration_configuration =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ configuration, "configuration");
+
+static void
+cmd_tx_offload_get_configuration_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tx_offload_get_configuration_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ struct rte_port *port = &ports[port_id];
+ uint64_t port_offloads;
+ uint64_t queue_offloads;
+ uint16_t nb_tx_queues;
+ int q;
+
+ printf("Tx Offloading Configuration of port %d :\n", port_id);
+
+ port_offloads = port->dev_conf.txmode.offloads;
+ printf(" Port :");
+ print_tx_offloads(port_offloads);
+ printf("\n");
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ nb_tx_queues = dev_info.nb_tx_queues;
+ for (q = 0; q < nb_tx_queues; q++) {
+ queue_offloads = port->tx_conf[q].offloads;
+ printf(" Queue[%2d] :", q);
+ print_tx_offloads(queue_offloads);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+cmdline_parse_inst_t cmd_tx_offload_get_configuration = {
+ .f = cmd_tx_offload_get_configuration_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> tx_offload configuration",
+ .tokens = {
+ (void *)&cmd_tx_offload_get_configuration_show,
+ (void *)&cmd_tx_offload_get_configuration_port,
+ (void *)&cmd_tx_offload_get_configuration_port_id,
+ (void *)&cmd_tx_offload_get_configuration_tx_offload,
+ (void *)&cmd_tx_offload_get_configuration_configuration,
+ NULL,
+ }
+};
+
+/* Enable/Disable a per port offloading */
+struct cmd_config_per_port_tx_offload_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t port_id;
+ cmdline_fixed_string_t tx_offload;
+ cmdline_fixed_string_t offload;
+ cmdline_fixed_string_t on_off;
+};
+
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_config =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ config, "config");
+cmdline_parse_token_num_t cmd_config_per_port_tx_offload_result_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_tx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ tx_offload, "tx_offload");
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ offload, "vlan_insert#ipv4_cksum#udp_cksum#udp_cksum#"
+ "sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#"
+ "qinq_insert#vxlan_tnl_tso#gre_tnl_tso#"
+ "ipip_tnl_tso#geneve_tnl_tso#macsec_insert#"
+ "mt_lockfree#multi_segs#fast_free#security");
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ on_off, "on#off");
+
+static uint64_t
+search_tx_offload(const char *name)
+{
+ uint64_t single_offload;
+ const char *single_name;
+ int found = 0;
+ unsigned int bit;
+
+ single_offload = 1;
+ for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) {
+ single_name = rte_eth_dev_tx_offload_name(single_offload);
+ if (!strcasecmp(single_name, name)) {
+ found = 1;
+ break;
+ } else if (!strcasecmp(single_name, "UNKNOWN"))
+ break;
+ else if (single_name == NULL)
+ break;
+ single_offload <<= 1;
+ }
+
+ if (found)
+ return single_offload;
+
+ return 0;
+}
+
+static void
+cmd_config_per_port_tx_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_per_port_tx_offload_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ struct rte_eth_dev_info dev_info;
+ struct rte_port *port = &ports[port_id];
+ uint64_t single_offload;
+ uint16_t nb_tx_queues;
+ int q;
+
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Error: Can't config offload when Port %d "
+ "is not stopped\n", port_id);
+ return;
+ }
+
+ single_offload = search_tx_offload(res->offload);
+ if (single_offload == 0) {
+ printf("Unknown offload name: %s\n", res->offload);
+ return;
+ }
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ nb_tx_queues = dev_info.nb_tx_queues;
+ if (!strcmp(res->on_off, "on")) {
+ port->dev_conf.txmode.offloads |= single_offload;
+ for (q = 0; q < nb_tx_queues; q++)
+ port->tx_conf[q].offloads |= single_offload;
+ } else {
+ port->dev_conf.txmode.offloads &= ~single_offload;
+ for (q = 0; q < nb_tx_queues; q++)
+ port->tx_conf[q].offloads &= ~single_offload;
+ }
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_port_tx_offload = {
+ .f = cmd_config_per_port_tx_offload_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> tx_offload "
+ "vlan_insert|ipv4_cksum|udp_cksum|udp_cksum|"
+ "sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|"
+ "qinq_insert|vxlan_tnl_tso|gre_tnl_tso|"
+ "ipip_tnl_tso|geneve_tnl_tso|macsec_insert|"
+ "mt_lockfree|multi_segs|fast_free|security "
+ "on|off",
+ .tokens = {
+ (void *)&cmd_config_per_port_tx_offload_result_port,
+ (void *)&cmd_config_per_port_tx_offload_result_config,
+ (void *)&cmd_config_per_port_tx_offload_result_port_id,
+ (void *)&cmd_config_per_port_tx_offload_result_tx_offload,
+ (void *)&cmd_config_per_port_tx_offload_result_offload,
+ (void *)&cmd_config_per_port_tx_offload_result_on_off,
+ NULL,
+ }
+};
+
+/* Enable/Disable a per queue offloading */
+struct cmd_config_per_queue_tx_offload_result {
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t txq;
+ uint16_t queue_id;
+ cmdline_fixed_string_t tx_offload;
+ cmdline_fixed_string_t offload;
+ cmdline_fixed_string_t on_off;
+};
+
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_config_per_queue_tx_offload_result_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txq =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ txq, "txq");
+cmdline_parse_token_num_t cmd_config_per_queue_tx_offload_result_queue_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ queue_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txoffload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ tx_offload, "tx_offload");
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ offload, "vlan_insert#ipv4_cksum#udp_cksum#udp_cksum#"
+ "sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#"
+ "qinq_insert#vxlan_tnl_tso#gre_tnl_tso#"
+ "ipip_tnl_tso#geneve_tnl_tso#macsec_insert#"
+ "mt_lockfree#multi_segs#fast_free#security");
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ on_off, "on#off");
+
+static void
+cmd_config_per_queue_tx_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_per_queue_tx_offload_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ uint16_t queue_id = res->queue_id;
+ struct rte_port *port = &ports[port_id];
+ uint64_t single_offload;
+
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Error: Can't config offload when Port %d "
+ "is not stopped\n", port_id);
+ return;
+ }
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (queue_id >= dev_info.nb_tx_queues) {
+ printf("Error: input queue_id should be 0 ... "
+ "%d\n", dev_info.nb_tx_queues - 1);
+ return;
+ }
+
+ single_offload = search_tx_offload(res->offload);
+ if (single_offload == 0) {
+ printf("Unknown offload name: %s\n", res->offload);
+ return;
+ }
+
+ if (!strcmp(res->on_off, "on"))
+ port->tx_conf[queue_id].offloads |= single_offload;
+ else
+ port->tx_conf[queue_id].offloads &= ~single_offload;
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_queue_tx_offload = {
+ .f = cmd_config_per_queue_tx_offload_parsed,
+ .data = NULL,
+ .help_str = "port <port_id> txq <queue_id> tx_offload "
+ "vlan_insert|ipv4_cksum|udp_cksum|udp_cksum|"
+ "sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|"
+ "qinq_insert|vxlan_tnl_tso|gre_tnl_tso|"
+ "ipip_tnl_tso|geneve_tnl_tso|macsec_insert|"
+ "mt_lockfree|multi_segs|fast_free|security "
+ "on|off",
+ .tokens = {
+ (void *)&cmd_config_per_queue_tx_offload_result_port,
+ (void *)&cmd_config_per_queue_tx_offload_result_port_id,
+ (void *)&cmd_config_per_queue_tx_offload_result_txq,
+ (void *)&cmd_config_per_queue_tx_offload_result_queue_id,
+ (void *)&cmd_config_per_queue_tx_offload_result_txoffload,
+ (void *)&cmd_config_per_queue_tx_offload_result_offload,
+ (void *)&cmd_config_per_queue_tx_offload_result_on_off,
+ NULL,
+ }
+};
+
/* ******************************************************************************** */
/* list of instructions */
@@ -16130,12 +17354,16 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_operate_detach_port,
(cmdline_parse_inst_t *)&cmd_config_speed_all,
(cmdline_parse_inst_t *)&cmd_config_speed_specific,
+ (cmdline_parse_inst_t *)&cmd_config_loopback_all,
+ (cmdline_parse_inst_t *)&cmd_config_loopback_specific,
(cmdline_parse_inst_t *)&cmd_config_rx_tx,
(cmdline_parse_inst_t *)&cmd_config_mtu,
(cmdline_parse_inst_t *)&cmd_config_max_pkt_len,
(cmdline_parse_inst_t *)&cmd_config_rx_mode_flag,
(cmdline_parse_inst_t *)&cmd_config_rss,
+ (cmdline_parse_inst_t *)&cmd_config_rxtx_ring_size,
(cmdline_parse_inst_t *)&cmd_config_rxtx_queue,
+ (cmdline_parse_inst_t *)&cmd_setup_rxtx_queue,
(cmdline_parse_inst_t *)&cmd_config_rss_reta,
(cmdline_parse_inst_t *)&cmd_showport_reta,
(cmdline_parse_inst_t *)&cmd_config_burst,
@@ -16271,7 +17499,22 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_add_port_tm_leaf_node,
(cmdline_parse_inst_t *)&cmd_del_port_tm_node,
(cmdline_parse_inst_t *)&cmd_set_port_tm_node_parent,
+ (cmdline_parse_inst_t *)&cmd_suspend_port_tm_node,
+ (cmdline_parse_inst_t *)&cmd_resume_port_tm_node,
(cmdline_parse_inst_t *)&cmd_port_tm_hierarchy_commit,
+ (cmdline_parse_inst_t *)&cmd_cfg_tunnel_udp_port,
+ (cmdline_parse_inst_t *)&cmd_rx_offload_get_capa,
+ (cmdline_parse_inst_t *)&cmd_rx_offload_get_configuration,
+ (cmdline_parse_inst_t *)&cmd_config_per_port_rx_offload,
+ (cmdline_parse_inst_t *)&cmd_config_per_queue_rx_offload,
+ (cmdline_parse_inst_t *)&cmd_tx_offload_get_capa,
+ (cmdline_parse_inst_t *)&cmd_tx_offload_get_configuration,
+ (cmdline_parse_inst_t *)&cmd_config_per_port_tx_offload,
+ (cmdline_parse_inst_t *)&cmd_config_per_queue_tx_offload,
+#ifdef RTE_LIBRTE_BPF
+ (cmdline_parse_inst_t *)&cmd_operate_bpf_ld_parse,
+ (cmdline_parse_inst_t *)&cmd_operate_bpf_unld_parse,
+#endif
NULL,
};
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a5cf84f7..9918d7fd 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -14,6 +14,7 @@
#include <sys/socket.h>
#include <rte_common.h>
+#include <rte_eth_ctrl.h>
#include <rte_ethdev.h>
#include <rte_byteorder.h>
#include <cmdline_parse.h>
@@ -68,6 +69,7 @@ enum index {
PRIORITY,
INGRESS,
EGRESS,
+ TRANSFER,
/* Validate/create pattern. */
PATTERN,
@@ -85,8 +87,12 @@ enum index {
ITEM_PF,
ITEM_VF,
ITEM_VF_ID,
- ITEM_PORT,
- ITEM_PORT_INDEX,
+ ITEM_PHY_PORT,
+ ITEM_PHY_PORT_INDEX,
+ ITEM_PORT_ID,
+ ITEM_PORT_ID_ID,
+ ITEM_MARK,
+ ITEM_MARK_ID,
ITEM_RAW,
ITEM_RAW_RELATIVE,
ITEM_RAW_SEARCH,
@@ -98,11 +104,11 @@ enum index {
ITEM_ETH_SRC,
ITEM_ETH_TYPE,
ITEM_VLAN,
- ITEM_VLAN_TPID,
ITEM_VLAN_TCI,
ITEM_VLAN_PCP,
ITEM_VLAN_DEI,
ITEM_VLAN_VID,
+ ITEM_VLAN_INNER_TYPE,
ITEM_IPV4,
ITEM_IPV4_TOS,
ITEM_IPV4_TTL,
@@ -150,6 +156,28 @@ enum index {
ITEM_GENEVE,
ITEM_GENEVE_VNI,
ITEM_GENEVE_PROTO,
+ ITEM_VXLAN_GPE,
+ ITEM_VXLAN_GPE_VNI,
+ ITEM_ARP_ETH_IPV4,
+ ITEM_ARP_ETH_IPV4_SHA,
+ ITEM_ARP_ETH_IPV4_SPA,
+ ITEM_ARP_ETH_IPV4_THA,
+ ITEM_ARP_ETH_IPV4_TPA,
+ ITEM_IPV6_EXT,
+ ITEM_IPV6_EXT_NEXT_HDR,
+ ITEM_ICMP6,
+ ITEM_ICMP6_TYPE,
+ ITEM_ICMP6_CODE,
+ ITEM_ICMP6_ND_NS,
+ ITEM_ICMP6_ND_NS_TARGET_ADDR,
+ ITEM_ICMP6_ND_NA,
+ ITEM_ICMP6_ND_NA_TARGET_ADDR,
+ ITEM_ICMP6_ND_OPT,
+ ITEM_ICMP6_ND_OPT_TYPE,
+ ITEM_ICMP6_ND_OPT_SLA_ETH,
+ ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
+ ITEM_ICMP6_ND_OPT_TLA_ETH,
+ ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
/* Validate/create actions. */
ACTIONS,
@@ -157,6 +185,8 @@ enum index {
ACTION_END,
ACTION_VOID,
ACTION_PASSTHRU,
+ ACTION_JUMP,
+ ACTION_JUMP_GROUP,
ACTION_MARK,
ACTION_MARK_ID,
ACTION_FLAG,
@@ -164,33 +194,67 @@ enum index {
ACTION_QUEUE_INDEX,
ACTION_DROP,
ACTION_COUNT,
- ACTION_DUP,
- ACTION_DUP_INDEX,
ACTION_RSS,
+ ACTION_RSS_FUNC,
+ ACTION_RSS_LEVEL,
+ ACTION_RSS_FUNC_DEFAULT,
+ ACTION_RSS_FUNC_TOEPLITZ,
+ ACTION_RSS_FUNC_SIMPLE_XOR,
+ ACTION_RSS_TYPES,
+ ACTION_RSS_TYPE,
+ ACTION_RSS_KEY,
+ ACTION_RSS_KEY_LEN,
ACTION_RSS_QUEUES,
ACTION_RSS_QUEUE,
ACTION_PF,
ACTION_VF,
ACTION_VF_ORIGINAL,
ACTION_VF_ID,
+ ACTION_PHY_PORT,
+ ACTION_PHY_PORT_ORIGINAL,
+ ACTION_PHY_PORT_INDEX,
+ ACTION_PORT_ID,
+ ACTION_PORT_ID_ORIGINAL,
+ ACTION_PORT_ID_ID,
ACTION_METER,
ACTION_METER_ID,
+ ACTION_OF_SET_MPLS_TTL,
+ ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
+ ACTION_OF_DEC_MPLS_TTL,
+ ACTION_OF_SET_NW_TTL,
+ ACTION_OF_SET_NW_TTL_NW_TTL,
+ ACTION_OF_DEC_NW_TTL,
+ ACTION_OF_COPY_TTL_OUT,
+ ACTION_OF_COPY_TTL_IN,
+ ACTION_OF_POP_VLAN,
+ ACTION_OF_PUSH_VLAN,
+ ACTION_OF_PUSH_VLAN_ETHERTYPE,
+ ACTION_OF_SET_VLAN_VID,
+ ACTION_OF_SET_VLAN_VID_VLAN_VID,
+ ACTION_OF_SET_VLAN_PCP,
+ ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
+ ACTION_OF_POP_MPLS,
+ ACTION_OF_POP_MPLS_ETHERTYPE,
+ ACTION_OF_PUSH_MPLS,
+ ACTION_OF_PUSH_MPLS_ETHERTYPE,
};
-/** Size of pattern[] field in struct rte_flow_item_raw. */
-#define ITEM_RAW_PATTERN_SIZE 36
+/** Maximum size for pattern in struct rte_flow_item_raw. */
+#define ITEM_RAW_PATTERN_SIZE 40
/** Storage size for struct rte_flow_item_raw including pattern. */
#define ITEM_RAW_SIZE \
- (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
+ (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
-/** Number of queue[] entries in struct rte_flow_action_rss. */
-#define ACTION_RSS_NUM 32
+/** Maximum number of queue indices in struct rte_flow_action_rss. */
+#define ACTION_RSS_QUEUE_NUM 32
-/** Storage size for struct rte_flow_action_rss including queues. */
-#define ACTION_RSS_SIZE \
- (offsetof(struct rte_flow_action_rss, queue) + \
- sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
+/** Storage for struct rte_flow_action_rss including external data. */
+struct action_rss_data {
+ struct rte_flow_action_rss conf;
+ uint8_t key[RSS_HASH_KEY_LENGTH];
+ uint16_t queue[ACTION_RSS_QUEUE_NUM];
+};
/** Maximum number of subsequent tokens and arguments on the stack. */
#define CTX_STACK_SIZE 16
@@ -217,6 +281,9 @@ struct context {
struct arg {
uint32_t hton:1; /**< Use network byte ordering. */
uint32_t sign:1; /**< Value is signed. */
+ uint32_t bounded:1; /**< Value is bounded. */
+ uintmax_t min; /**< Minimum value if bounded. */
+ uintmax_t max; /**< Maximum value if bounded. */
uint32_t offset; /**< Relative offset from ctx->object. */
uint32_t size; /**< Field size. */
const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
@@ -309,11 +376,21 @@ struct token {
.size = sizeof(*((s *)0)->f), \
})
-/** Static initializer for ARGS() with arbitrary size. */
-#define ARGS_ENTRY_USZ(s, f, sz) \
+/** Static initializer for ARGS() with arbitrary offset and size. */
+#define ARGS_ENTRY_ARB(o, s) \
(&(const struct arg){ \
- .offset = offsetof(s, f), \
- .size = (sz), \
+ .offset = (o), \
+ .size = (s), \
+ })
+
+/** Same as ARGS_ENTRY_ARB() with bounded values. */
+#define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
+ (&(const struct arg){ \
+ .bounded = 1, \
+ .min = (i), \
+ .max = (a), \
+ .offset = (o), \
+ .size = (s), \
})
/** Same as ARGS_ENTRY() using network byte ordering. */
@@ -343,7 +420,7 @@ struct buffer {
} destroy; /**< Destroy arguments. */
struct {
uint32_t rule;
- enum rte_flow_action_type action;
+ struct rte_flow_action action;
} query; /**< Query arguments. */
struct {
uint32_t *group;
@@ -384,6 +461,7 @@ static const enum index next_vc_attr[] = {
PRIORITY,
INGRESS,
EGRESS,
+ TRANSFER,
PATTERN,
ZERO,
};
@@ -416,7 +494,9 @@ static const enum index next_item[] = {
ITEM_ANY,
ITEM_PF,
ITEM_VF,
- ITEM_PORT,
+ ITEM_PHY_PORT,
+ ITEM_PORT_ID,
+ ITEM_MARK,
ITEM_RAW,
ITEM_ETH,
ITEM_VLAN,
@@ -436,6 +516,15 @@ static const enum index next_item[] = {
ITEM_GTPC,
ITEM_GTPU,
ITEM_GENEVE,
+ ITEM_VXLAN_GPE,
+ ITEM_ARP_ETH_IPV4,
+ ITEM_IPV6_EXT,
+ ITEM_ICMP6,
+ ITEM_ICMP6_ND_NS,
+ ITEM_ICMP6_ND_NA,
+ ITEM_ICMP6_ND_OPT,
+ ITEM_ICMP6_ND_OPT_SLA_ETH,
+ ITEM_ICMP6_ND_OPT_TLA_ETH,
ZERO,
};
@@ -457,8 +546,20 @@ static const enum index item_vf[] = {
ZERO,
};
-static const enum index item_port[] = {
- ITEM_PORT_INDEX,
+static const enum index item_phy_port[] = {
+ ITEM_PHY_PORT_INDEX,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_port_id[] = {
+ ITEM_PORT_ID_ID,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_mark[] = {
+ ITEM_MARK_ID,
ITEM_NEXT,
ZERO,
};
@@ -482,11 +583,11 @@ static const enum index item_eth[] = {
};
static const enum index item_vlan[] = {
- ITEM_VLAN_TPID,
ITEM_VLAN_TCI,
ITEM_VLAN_PCP,
ITEM_VLAN_DEI,
ITEM_VLAN_VID,
+ ITEM_VLAN_INNER_TYPE,
ITEM_NEXT,
ZERO,
};
@@ -586,20 +687,92 @@ static const enum index item_geneve[] = {
ZERO,
};
+static const enum index item_vxlan_gpe[] = {
+ ITEM_VXLAN_GPE_VNI,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_arp_eth_ipv4[] = {
+ ITEM_ARP_ETH_IPV4_SHA,
+ ITEM_ARP_ETH_IPV4_SPA,
+ ITEM_ARP_ETH_IPV4_THA,
+ ITEM_ARP_ETH_IPV4_TPA,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_ipv6_ext[] = {
+ ITEM_IPV6_EXT_NEXT_HDR,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6[] = {
+ ITEM_ICMP6_TYPE,
+ ITEM_ICMP6_CODE,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_ns[] = {
+ ITEM_ICMP6_ND_NS_TARGET_ADDR,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_na[] = {
+ ITEM_ICMP6_ND_NA_TARGET_ADDR,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_opt[] = {
+ ITEM_ICMP6_ND_OPT_TYPE,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_opt_sla_eth[] = {
+ ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_opt_tla_eth[] = {
+ ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
+ ITEM_NEXT,
+ ZERO,
+};
+
static const enum index next_action[] = {
ACTION_END,
ACTION_VOID,
ACTION_PASSTHRU,
+ ACTION_JUMP,
ACTION_MARK,
ACTION_FLAG,
ACTION_QUEUE,
ACTION_DROP,
ACTION_COUNT,
- ACTION_DUP,
ACTION_RSS,
ACTION_PF,
ACTION_VF,
+ ACTION_PHY_PORT,
+ ACTION_PORT_ID,
ACTION_METER,
+ ACTION_OF_SET_MPLS_TTL,
+ ACTION_OF_DEC_MPLS_TTL,
+ ACTION_OF_SET_NW_TTL,
+ ACTION_OF_DEC_NW_TTL,
+ ACTION_OF_COPY_TTL_OUT,
+ ACTION_OF_COPY_TTL_IN,
+ ACTION_OF_POP_VLAN,
+ ACTION_OF_PUSH_VLAN,
+ ACTION_OF_SET_VLAN_VID,
+ ACTION_OF_SET_VLAN_PCP,
+ ACTION_OF_POP_MPLS,
+ ACTION_OF_PUSH_MPLS,
ZERO,
};
@@ -615,13 +788,12 @@ static const enum index action_queue[] = {
ZERO,
};
-static const enum index action_dup[] = {
- ACTION_DUP_INDEX,
- ACTION_NEXT,
- ZERO,
-};
-
static const enum index action_rss[] = {
+ ACTION_RSS_FUNC,
+ ACTION_RSS_LEVEL,
+ ACTION_RSS_TYPES,
+ ACTION_RSS_KEY,
+ ACTION_RSS_KEY_LEN,
ACTION_RSS_QUEUES,
ACTION_NEXT,
ZERO,
@@ -634,12 +806,74 @@ static const enum index action_vf[] = {
ZERO,
};
+static const enum index action_phy_port[] = {
+ ACTION_PHY_PORT_ORIGINAL,
+ ACTION_PHY_PORT_INDEX,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_port_id[] = {
+ ACTION_PORT_ID_ORIGINAL,
+ ACTION_PORT_ID_ID,
+ ACTION_NEXT,
+ ZERO,
+};
+
static const enum index action_meter[] = {
ACTION_METER_ID,
ACTION_NEXT,
ZERO,
};
+static const enum index action_of_set_mpls_ttl[] = {
+ ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_set_nw_ttl[] = {
+ ACTION_OF_SET_NW_TTL_NW_TTL,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_push_vlan[] = {
+ ACTION_OF_PUSH_VLAN_ETHERTYPE,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_set_vlan_vid[] = {
+ ACTION_OF_SET_VLAN_VID_VLAN_VID,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_set_vlan_pcp[] = {
+ ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_pop_mpls[] = {
+ ACTION_OF_POP_MPLS_ETHERTYPE,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_push_mpls[] = {
+ ACTION_OF_PUSH_MPLS_ETHERTYPE,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_jump[] = {
+ ACTION_JUMP_GROUP,
+ ACTION_NEXT,
+ ZERO,
+};
+
static int parse_init(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -650,6 +884,15 @@ static int parse_vc_spec(struct context *, const struct token *,
const char *, unsigned int, void *, unsigned int);
static int parse_vc_conf(struct context *, const struct token *,
const char *, unsigned int, void *, unsigned int);
+static int parse_vc_action_rss(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_rss_func(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_rss_type(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
static int parse_vc_action_rss_queue(struct context *, const struct token *,
const char *, unsigned int, void *,
unsigned int);
@@ -705,6 +948,8 @@ static int comp_port(struct context *, const struct token *,
unsigned int, char *, unsigned int);
static int comp_rule_id(struct context *, const struct token *,
unsigned int, char *, unsigned int);
+static int comp_vc_action_rss_type(struct context *, const struct token *,
+ unsigned int, char *, unsigned int);
static int comp_vc_action_rss_queue(struct context *, const struct token *,
unsigned int, char *, unsigned int);
@@ -856,7 +1101,7 @@ static const struct token token_list[] = {
.next = NEXT(NEXT_ENTRY(QUERY_ACTION),
NEXT_ENTRY(RULE_ID),
NEXT_ENTRY(PORT_ID)),
- .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
ARGS_ENTRY(struct buffer, args.query.rule),
ARGS_ENTRY(struct buffer, port)),
.call = parse_query,
@@ -928,6 +1173,12 @@ static const struct token token_list[] = {
.next = NEXT(next_vc_attr),
.call = parse_vc,
},
+ [TRANSFER] = {
+ .name = "transfer",
+ .help = "apply rule directly to endpoints found in pattern",
+ .next = NEXT(next_vc_attr),
+ .call = parse_vc,
+ },
/* Validate/create pattern. */
[PATTERN] = {
.name = "pattern",
@@ -1001,36 +1252,64 @@ static const struct token token_list[] = {
},
[ITEM_PF] = {
.name = "pf",
- .help = "match packets addressed to the physical function",
+ .help = "match traffic from/to the physical function",
.priv = PRIV_ITEM(PF, 0),
.next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
.call = parse_vc,
},
[ITEM_VF] = {
.name = "vf",
- .help = "match packets addressed to a virtual function ID",
+ .help = "match traffic from/to a virtual function ID",
.priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
.next = NEXT(item_vf),
.call = parse_vc,
},
[ITEM_VF_ID] = {
.name = "id",
- .help = "destination VF ID",
+ .help = "VF ID",
.next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
.args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
},
- [ITEM_PORT] = {
- .name = "port",
- .help = "device-specific physical port index to use",
- .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
- .next = NEXT(item_port),
+ [ITEM_PHY_PORT] = {
+ .name = "phy_port",
+ .help = "match traffic from/to a specific physical port",
+ .priv = PRIV_ITEM(PHY_PORT,
+ sizeof(struct rte_flow_item_phy_port)),
+ .next = NEXT(item_phy_port),
.call = parse_vc,
},
- [ITEM_PORT_INDEX] = {
+ [ITEM_PHY_PORT_INDEX] = {
.name = "index",
.help = "physical port index",
- .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
- .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
+ .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
+ },
+ [ITEM_PORT_ID] = {
+ .name = "port_id",
+ .help = "match traffic from/to a given DPDK port ID",
+ .priv = PRIV_ITEM(PORT_ID,
+ sizeof(struct rte_flow_item_port_id)),
+ .next = NEXT(item_port_id),
+ .call = parse_vc,
+ },
+ [ITEM_PORT_ID_ID] = {
+ .name = "id",
+ .help = "DPDK port ID",
+ .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
+ },
+ [ITEM_MARK] = {
+ .name = "mark",
+ .help = "match traffic against value set in previously matched rule",
+ .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
+ .next = NEXT(item_mark),
+ .call = parse_vc,
+ },
+ [ITEM_MARK_ID] = {
+ .name = "id",
+ .help = "Integer value to match against",
+ .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
},
[ITEM_RAW] = {
.name = "raw",
@@ -1073,9 +1352,9 @@ static const struct token token_list[] = {
NEXT_ENTRY(ITEM_PARAM_IS,
ITEM_PARAM_SPEC,
ITEM_PARAM_MASK)),
- .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
- ARGS_ENTRY_USZ(struct rte_flow_item_raw,
- pattern,
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
+ ARGS_ENTRY(struct rte_flow_item_raw, length),
+ ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
ITEM_RAW_PATTERN_SIZE)),
},
[ITEM_ETH] = {
@@ -1110,12 +1389,6 @@ static const struct token token_list[] = {
.next = NEXT(item_vlan),
.call = parse_vc,
},
- [ITEM_VLAN_TPID] = {
- .name = "tpid",
- .help = "tag protocol identifier",
- .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
- .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
- },
[ITEM_VLAN_TCI] = {
.name = "tci",
.help = "tag control information",
@@ -1143,6 +1416,13 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
tci, "\x0f\xff")),
},
+ [ITEM_VLAN_INNER_TYPE] = {
+ .name = "inner_type",
+ .help = "inner EtherType",
+ .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
+ inner_type)),
+ },
[ITEM_IPV4] = {
.name = "ipv4",
.help = "match IPv4 header",
@@ -1473,6 +1753,182 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
protocol)),
},
+ [ITEM_VXLAN_GPE] = {
+ .name = "vxlan-gpe",
+ .help = "match VXLAN-GPE header",
+ .priv = PRIV_ITEM(VXLAN_GPE,
+ sizeof(struct rte_flow_item_vxlan_gpe)),
+ .next = NEXT(item_vxlan_gpe),
+ .call = parse_vc,
+ },
+ [ITEM_VXLAN_GPE_VNI] = {
+ .name = "vni",
+ .help = "VXLAN-GPE identifier",
+ .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
+ vni)),
+ },
+ [ITEM_ARP_ETH_IPV4] = {
+ .name = "arp_eth_ipv4",
+ .help = "match ARP header for Ethernet/IPv4",
+ .priv = PRIV_ITEM(ARP_ETH_IPV4,
+ sizeof(struct rte_flow_item_arp_eth_ipv4)),
+ .next = NEXT(item_arp_eth_ipv4),
+ .call = parse_vc,
+ },
+ [ITEM_ARP_ETH_IPV4_SHA] = {
+ .name = "sha",
+ .help = "sender hardware address",
+ .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
+ sha)),
+ },
+ [ITEM_ARP_ETH_IPV4_SPA] = {
+ .name = "spa",
+ .help = "sender IPv4 address",
+ .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
+ spa)),
+ },
+ [ITEM_ARP_ETH_IPV4_THA] = {
+ .name = "tha",
+ .help = "target hardware address",
+ .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
+ tha)),
+ },
+ [ITEM_ARP_ETH_IPV4_TPA] = {
+ .name = "tpa",
+ .help = "target IPv4 address",
+ .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
+ tpa)),
+ },
+ [ITEM_IPV6_EXT] = {
+ .name = "ipv6_ext",
+ .help = "match presence of any IPv6 extension header",
+ .priv = PRIV_ITEM(IPV6_EXT,
+ sizeof(struct rte_flow_item_ipv6_ext)),
+ .next = NEXT(item_ipv6_ext),
+ .call = parse_vc,
+ },
+ [ITEM_IPV6_EXT_NEXT_HDR] = {
+ .name = "next_hdr",
+ .help = "next header",
+ .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
+ next_hdr)),
+ },
+ [ITEM_ICMP6] = {
+ .name = "icmp6",
+ .help = "match any ICMPv6 header",
+ .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
+ .next = NEXT(item_icmp6),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_TYPE] = {
+ .name = "type",
+ .help = "ICMPv6 type",
+ .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
+ type)),
+ },
+ [ITEM_ICMP6_CODE] = {
+ .name = "code",
+ .help = "ICMPv6 code",
+ .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
+ code)),
+ },
+ [ITEM_ICMP6_ND_NS] = {
+ .name = "icmp6_nd_ns",
+ .help = "match ICMPv6 neighbor discovery solicitation",
+ .priv = PRIV_ITEM(ICMP6_ND_NS,
+ sizeof(struct rte_flow_item_icmp6_nd_ns)),
+ .next = NEXT(item_icmp6_nd_ns),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
+ .name = "target_addr",
+ .help = "target address",
+ .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
+ target_addr)),
+ },
+ [ITEM_ICMP6_ND_NA] = {
+ .name = "icmp6_nd_na",
+ .help = "match ICMPv6 neighbor discovery advertisement",
+ .priv = PRIV_ITEM(ICMP6_ND_NA,
+ sizeof(struct rte_flow_item_icmp6_nd_na)),
+ .next = NEXT(item_icmp6_nd_na),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
+ .name = "target_addr",
+ .help = "target address",
+ .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
+ target_addr)),
+ },
+ [ITEM_ICMP6_ND_OPT] = {
+ .name = "icmp6_nd_opt",
+ .help = "match presence of any ICMPv6 neighbor discovery"
+ " option",
+ .priv = PRIV_ITEM(ICMP6_ND_OPT,
+ sizeof(struct rte_flow_item_icmp6_nd_opt)),
+ .next = NEXT(item_icmp6_nd_opt),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_OPT_TYPE] = {
+ .name = "type",
+ .help = "ND option type",
+ .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
+ type)),
+ },
+ [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
+ .name = "icmp6_nd_opt_sla_eth",
+ .help = "match ICMPv6 neighbor discovery source Ethernet"
+ " link-layer address option",
+ .priv = PRIV_ITEM
+ (ICMP6_ND_OPT_SLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
+ .next = NEXT(item_icmp6_nd_opt_sla_eth),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
+ .name = "sla",
+ .help = "source Ethernet LLA",
+ .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
+ },
+ [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
+ .name = "icmp6_nd_opt_tla_eth",
+ .help = "match ICMPv6 neighbor discovery target Ethernet"
+ " link-layer address option",
+ .priv = PRIV_ITEM
+ (ICMP6_ND_OPT_TLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
+ .next = NEXT(item_icmp6_nd_opt_tla_eth),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
+ .name = "tla",
+ .help = "target Ethernet LLA",
+ .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
+ },
/* Validate/create actions. */
[ACTIONS] = {
@@ -1506,6 +1962,20 @@ static const struct token token_list[] = {
.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
.call = parse_vc,
},
+ [ACTION_JUMP] = {
+ .name = "jump",
+ .help = "redirect traffic to a given group",
+ .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
+ .next = NEXT(action_jump),
+ .call = parse_vc,
+ },
+ [ACTION_JUMP_GROUP] = {
+ .name = "group",
+ .help = "group to redirect traffic to",
+ .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
+ .call = parse_vc_conf,
+ },
[ACTION_MARK] = {
.name = "mark",
.help = "attach 32 bit value to packets",
@@ -1556,26 +2026,80 @@ static const struct token token_list[] = {
.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
.call = parse_vc,
},
- [ACTION_DUP] = {
- .name = "dup",
- .help = "duplicate packets to a given queue index",
- .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
- .next = NEXT(action_dup),
- .call = parse_vc,
- },
- [ACTION_DUP_INDEX] = {
- .name = "index",
- .help = "queue index to duplicate packets to",
- .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
- .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
- .call = parse_vc_conf,
- },
[ACTION_RSS] = {
.name = "rss",
.help = "spread packets among several queues",
- .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
+ .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
.next = NEXT(action_rss),
- .call = parse_vc,
+ .call = parse_vc_action_rss,
+ },
+ [ACTION_RSS_FUNC] = {
+ .name = "func",
+ .help = "RSS hash function to apply",
+ .next = NEXT(action_rss,
+ NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
+ ACTION_RSS_FUNC_TOEPLITZ,
+ ACTION_RSS_FUNC_SIMPLE_XOR)),
+ },
+ [ACTION_RSS_FUNC_DEFAULT] = {
+ .name = "default",
+ .help = "default hash function",
+ .call = parse_vc_action_rss_func,
+ },
+ [ACTION_RSS_FUNC_TOEPLITZ] = {
+ .name = "toeplitz",
+ .help = "Toeplitz hash function",
+ .call = parse_vc_action_rss_func,
+ },
+ [ACTION_RSS_FUNC_SIMPLE_XOR] = {
+ .name = "simple_xor",
+ .help = "simple XOR hash function",
+ .call = parse_vc_action_rss_func,
+ },
+ [ACTION_RSS_LEVEL] = {
+ .name = "level",
+ .help = "encapsulation level for \"types\"",
+ .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_ARB
+ (offsetof(struct action_rss_data, conf) +
+ offsetof(struct rte_flow_action_rss, level),
+ sizeof(((struct rte_flow_action_rss *)0)->
+ level))),
+ },
+ [ACTION_RSS_TYPES] = {
+ .name = "types",
+ .help = "specific RSS hash types",
+ .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
+ },
+ [ACTION_RSS_TYPE] = {
+ .name = "{type}",
+ .help = "RSS hash type",
+ .call = parse_vc_action_rss_type,
+ .comp = comp_vc_action_rss_type,
+ },
+ [ACTION_RSS_KEY] = {
+ .name = "key",
+ .help = "RSS hash key",
+ .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
+ .args = ARGS(ARGS_ENTRY_ARB(0, 0),
+ ARGS_ENTRY_ARB
+ (offsetof(struct action_rss_data, conf) +
+ offsetof(struct rte_flow_action_rss, key_len),
+ sizeof(((struct rte_flow_action_rss *)0)->
+ key_len)),
+ ARGS_ENTRY(struct action_rss_data, key)),
+ },
+ [ACTION_RSS_KEY_LEN] = {
+ .name = "key_len",
+ .help = "RSS hash key length in bytes",
+ .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
+ (offsetof(struct action_rss_data, conf) +
+ offsetof(struct rte_flow_action_rss, key_len),
+ sizeof(((struct rte_flow_action_rss *)0)->
+ key_len),
+ 0,
+ RSS_HASH_KEY_LENGTH)),
},
[ACTION_RSS_QUEUES] = {
.name = "queues",
@@ -1591,14 +2115,14 @@ static const struct token token_list[] = {
},
[ACTION_PF] = {
.name = "pf",
- .help = "redirect packets to physical device function",
+ .help = "direct traffic to physical function",
.priv = PRIV_ACTION(PF, 0),
.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
.call = parse_vc,
},
[ACTION_VF] = {
.name = "vf",
- .help = "redirect packets to virtual device function",
+ .help = "direct traffic to a virtual function ID",
.priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
.next = NEXT(action_vf),
.call = parse_vc,
@@ -1613,11 +2137,58 @@ static const struct token token_list[] = {
},
[ACTION_VF_ID] = {
.name = "id",
- .help = "VF ID to redirect packets to",
+ .help = "VF ID",
.next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
.args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
.call = parse_vc_conf,
},
+ [ACTION_PHY_PORT] = {
+ .name = "phy_port",
+ .help = "direct packets to physical port index",
+ .priv = PRIV_ACTION(PHY_PORT,
+ sizeof(struct rte_flow_action_phy_port)),
+ .next = NEXT(action_phy_port),
+ .call = parse_vc,
+ },
+ [ACTION_PHY_PORT_ORIGINAL] = {
+ .name = "original",
+ .help = "use original port index if possible",
+ .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
+ original, 1)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_PHY_PORT_INDEX] = {
+ .name = "index",
+ .help = "physical port index",
+ .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
+ index)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_PORT_ID] = {
+ .name = "port_id",
+ .help = "direct matching traffic to a given DPDK port ID",
+ .priv = PRIV_ACTION(PORT_ID,
+ sizeof(struct rte_flow_action_port_id)),
+ .next = NEXT(action_port_id),
+ .call = parse_vc,
+ },
+ [ACTION_PORT_ID_ORIGINAL] = {
+ .name = "original",
+ .help = "use original DPDK port ID if possible",
+ .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
+ original, 1)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_PORT_ID_ID] = {
+ .name = "id",
+ .help = "DPDK port ID",
+ .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
+ .call = parse_vc_conf,
+ },
[ACTION_METER] = {
.name = "meter",
.help = "meter the directed packets at given id",
@@ -1633,6 +2204,164 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
.call = parse_vc_conf,
},
+ [ACTION_OF_SET_MPLS_TTL] = {
+ .name = "of_set_mpls_ttl",
+ .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
+ .priv = PRIV_ACTION
+ (OF_SET_MPLS_TTL,
+ sizeof(struct rte_flow_action_of_set_mpls_ttl)),
+ .next = NEXT(action_of_set_mpls_ttl),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
+ .name = "mpls_ttl",
+ .help = "MPLS TTL",
+ .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
+ mpls_ttl)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_DEC_MPLS_TTL] = {
+ .name = "of_dec_mpls_ttl",
+ .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
+ .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_NW_TTL] = {
+ .name = "of_set_nw_ttl",
+ .help = "OpenFlow's OFPAT_SET_NW_TTL",
+ .priv = PRIV_ACTION
+ (OF_SET_NW_TTL,
+ sizeof(struct rte_flow_action_of_set_nw_ttl)),
+ .next = NEXT(action_of_set_nw_ttl),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_NW_TTL_NW_TTL] = {
+ .name = "nw_ttl",
+ .help = "IP TTL",
+ .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
+ nw_ttl)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_DEC_NW_TTL] = {
+ .name = "of_dec_nw_ttl",
+ .help = "OpenFlow's OFPAT_DEC_NW_TTL",
+ .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_COPY_TTL_OUT] = {
+ .name = "of_copy_ttl_out",
+ .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
+ .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_COPY_TTL_IN] = {
+ .name = "of_copy_ttl_in",
+ .help = "OpenFlow's OFPAT_COPY_TTL_IN",
+ .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_POP_VLAN] = {
+ .name = "of_pop_vlan",
+ .help = "OpenFlow's OFPAT_POP_VLAN",
+ .priv = PRIV_ACTION(OF_POP_VLAN, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_PUSH_VLAN] = {
+ .name = "of_push_vlan",
+ .help = "OpenFlow's OFPAT_PUSH_VLAN",
+ .priv = PRIV_ACTION
+ (OF_PUSH_VLAN,
+ sizeof(struct rte_flow_action_of_push_vlan)),
+ .next = NEXT(action_of_push_vlan),
+ .call = parse_vc,
+ },
+ [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
+ .name = "ethertype",
+ .help = "EtherType",
+ .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_push_vlan,
+ ethertype)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_SET_VLAN_VID] = {
+ .name = "of_set_vlan_vid",
+ .help = "OpenFlow's OFPAT_SET_VLAN_VID",
+ .priv = PRIV_ACTION
+ (OF_SET_VLAN_VID,
+ sizeof(struct rte_flow_action_of_set_vlan_vid)),
+ .next = NEXT(action_of_set_vlan_vid),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
+ .name = "vlan_vid",
+ .help = "VLAN id",
+ .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_set_vlan_vid,
+ vlan_vid)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_SET_VLAN_PCP] = {
+ .name = "of_set_vlan_pcp",
+ .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
+ .priv = PRIV_ACTION
+ (OF_SET_VLAN_PCP,
+ sizeof(struct rte_flow_action_of_set_vlan_pcp)),
+ .next = NEXT(action_of_set_vlan_pcp),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
+ .name = "vlan_pcp",
+ .help = "VLAN priority",
+ .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_set_vlan_pcp,
+ vlan_pcp)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_POP_MPLS] = {
+ .name = "of_pop_mpls",
+ .help = "OpenFlow's OFPAT_POP_MPLS",
+ .priv = PRIV_ACTION(OF_POP_MPLS,
+ sizeof(struct rte_flow_action_of_pop_mpls)),
+ .next = NEXT(action_of_pop_mpls),
+ .call = parse_vc,
+ },
+ [ACTION_OF_POP_MPLS_ETHERTYPE] = {
+ .name = "ethertype",
+ .help = "EtherType",
+ .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_pop_mpls,
+ ethertype)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_PUSH_MPLS] = {
+ .name = "of_push_mpls",
+ .help = "OpenFlow's OFPAT_PUSH_MPLS",
+ .priv = PRIV_ACTION
+ (OF_PUSH_MPLS,
+ sizeof(struct rte_flow_action_of_push_mpls)),
+ .next = NEXT(action_of_push_mpls),
+ .call = parse_vc,
+ },
+ [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
+ .name = "ethertype",
+ .help = "EtherType",
+ .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_push_mpls,
+ ethertype)),
+ .call = parse_vc_conf,
+ },
};
/** Remove and return last entry from argument stack. */
@@ -1858,6 +2587,9 @@ parse_vc(struct context *ctx, const struct token *token,
case EGRESS:
out->args.vc.attr.egress = 1;
return len;
+ case TRANSFER:
+ out->args.vc.attr.transfer = 1;
+ return len;
case PATTERN:
out->args.vc.pattern =
(void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
@@ -1909,6 +2641,7 @@ parse_vc(struct context *ctx, const struct token *token,
return -1;
*action = (struct rte_flow_action){
.type = priv->type,
+ .conf = data_size ? data : NULL,
};
++out->args.vc.actions_n;
ctx->object = action;
@@ -1989,7 +2722,6 @@ parse_vc_conf(struct context *ctx, const struct token *token,
void *buf, unsigned int size)
{
struct buffer *out = buf;
- struct rte_flow_action *action;
(void)size;
/* Token name must match. */
@@ -1998,14 +2730,147 @@ parse_vc_conf(struct context *ctx, const struct token *token,
/* Nothing else to do if there is no buffer. */
if (!out)
return len;
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ return len;
+}
+
+/** Parse RSS action. */
+static int
+parse_vc_action_rss(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_rss_data *action_rss_data;
+ unsigned int i;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
if (!out->args.vc.actions_n)
return -1;
action = &out->args.vc.actions[out->args.vc.actions_n - 1];
/* Point to selected object. */
ctx->object = out->args.vc.data;
ctx->objmask = NULL;
- /* Update configuration pointer. */
- action->conf = ctx->object;
+ /* Set up default configuration. */
+ action_rss_data = ctx->object;
+ *action_rss_data = (struct action_rss_data){
+ .conf = (struct rte_flow_action_rss){
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = rss_hf,
+ .key_len = sizeof(action_rss_data->key),
+ .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
+ .key = action_rss_data->key,
+ .queue = action_rss_data->queue,
+ },
+ .key = "testpmd's default RSS hash key, "
+ "override it for better balancing",
+ .queue = { 0 },
+ };
+ for (i = 0; i < action_rss_data->conf.queue_num; ++i)
+ action_rss_data->queue[i] = i;
+ if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
+ ctx->port != (portid_t)RTE_PORT_ALL) {
+ struct rte_eth_dev_info info;
+
+ rte_eth_dev_info_get(ctx->port, &info);
+ action_rss_data->conf.key_len =
+ RTE_MIN(sizeof(action_rss_data->key),
+ info.hash_key_size);
+ }
+ action->conf = &action_rss_data->conf;
+ return ret;
+}
+
+/**
+ * Parse func field for RSS action.
+ *
+ * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
+ * ACTION_RSS_FUNC_* index that called this function.
+ */
+static int
+parse_vc_action_rss_func(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct action_rss_data *action_rss_data;
+ enum rte_eth_hash_function func;
+
+ (void)buf;
+ (void)size;
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ switch (ctx->curr) {
+ case ACTION_RSS_FUNC_DEFAULT:
+ func = RTE_ETH_HASH_FUNCTION_DEFAULT;
+ break;
+ case ACTION_RSS_FUNC_TOEPLITZ:
+ func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ break;
+ case ACTION_RSS_FUNC_SIMPLE_XOR:
+ func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ break;
+ default:
+ return -1;
+ }
+ if (!ctx->object)
+ return len;
+ action_rss_data = ctx->object;
+ action_rss_data->conf.func = func;
+ return len;
+}
+
+/**
+ * Parse type field for RSS action.
+ *
+ * Valid tokens are type field names and the "end" token.
+ */
+static int
+parse_vc_action_rss_type(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
+ struct action_rss_data *action_rss_data;
+ unsigned int i;
+
+ (void)token;
+ (void)buf;
+ (void)size;
+ if (ctx->curr != ACTION_RSS_TYPE)
+ return -1;
+ if (!(ctx->objdata >> 16) && ctx->object) {
+ action_rss_data = ctx->object;
+ action_rss_data->conf.types = 0;
+ }
+ if (!strcmp_partial("end", str, len)) {
+ ctx->objdata &= 0xffff;
+ return len;
+ }
+ for (i = 0; rss_type_table[i].str; ++i)
+ if (!strcmp_partial(rss_type_table[i].str, str, len))
+ break;
+ if (!rss_type_table[i].str)
+ return -1;
+ ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
+ /* Repeat token. */
+ if (ctx->next_num == RTE_DIM(ctx->next))
+ return -1;
+ ctx->next[ctx->next_num++] = next;
+ if (!ctx->object)
+ return len;
+ action_rss_data = ctx->object;
+ action_rss_data->conf.types |= rss_type_table[i].rss_type;
return len;
}
@@ -2020,6 +2885,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
void *buf, unsigned int size)
{
static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
+ struct action_rss_data *action_rss_data;
int ret;
int i;
@@ -2031,11 +2897,14 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
i = ctx->objdata >> 16;
if (!strcmp_partial("end", str, len)) {
ctx->objdata &= 0xffff;
- return len;
+ goto end;
}
- if (i >= ACTION_RSS_NUM)
+ if (i >= ACTION_RSS_QUEUE_NUM)
return -1;
- if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
+ if (push_args(ctx,
+ ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
+ i * sizeof(action_rss_data->queue[i]),
+ sizeof(action_rss_data->queue[i]))))
return -1;
ret = parse_int(ctx, token, str, len, NULL, 0);
if (ret < 0) {
@@ -2048,9 +2917,12 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
if (ctx->next_num == RTE_DIM(ctx->next))
return -1;
ctx->next[ctx->next_num++] = next;
+end:
if (!ctx->object)
return len;
- ((struct rte_flow_action_rss *)ctx->object)->num = i;
+ action_rss_data = ctx->object;
+ action_rss_data->conf.queue_num = i;
+ action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
return len;
}
@@ -2269,6 +3141,11 @@ parse_int(struct context *ctx, const struct token *token,
strtoumax(str, &end, 0);
if (errno || (size_t)(end - str) != len)
goto error;
+ if (arg->bounded &&
+ ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
+ (intmax_t)u > (intmax_t)arg->max)) ||
+ (!arg->sign && (u < arg->min || u > arg->max))))
+ goto error;
if (!ctx->object)
return len;
if (arg->mask) {
@@ -2323,8 +3200,8 @@ error:
/**
* Parse a string.
*
- * Two arguments (ctx->args) are retrieved from the stack to store data and
- * its length (in that order).
+ * Three arguments (ctx->args) are retrieved from the stack to store data,
+ * its actual length and address (in that order).
*/
static int
parse_string(struct context *ctx, const struct token *token,
@@ -2333,6 +3210,7 @@ parse_string(struct context *ctx, const struct token *token,
{
const struct arg *arg_data = pop_args(ctx);
const struct arg *arg_len = pop_args(ctx);
+ const struct arg *arg_addr = pop_args(ctx);
char tmp[16]; /* Ought to be enough. */
int ret;
@@ -2343,6 +3221,11 @@ parse_string(struct context *ctx, const struct token *token,
push_args(ctx, arg_data);
return -1;
}
+ if (!arg_addr) {
+ push_args(ctx, arg_len);
+ push_args(ctx, arg_data);
+ return -1;
+ }
size = arg_data->size;
/* Bit-mask fill is not supported. */
if (arg_data->mask || size < len)
@@ -2362,11 +3245,26 @@ parse_string(struct context *ctx, const struct token *token,
buf = (uint8_t *)ctx->object + arg_data->offset;
/* Output buffer is not necessarily NUL-terminated. */
memcpy(buf, str, len);
- memset((uint8_t *)buf + len, 0x55, size - len);
+ memset((uint8_t *)buf + len, 0x00, size - len);
if (ctx->objmask)
memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
+ /* Save address if requested. */
+ if (arg_addr->size) {
+ memcpy((uint8_t *)ctx->object + arg_addr->offset,
+ (void *[]){
+ (uint8_t *)ctx->object + arg_data->offset
+ },
+ arg_addr->size);
+ if (ctx->objmask)
+ memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
+ (void *[]){
+ (uint8_t *)ctx->objmask + arg_data->offset
+ },
+ arg_addr->size);
+ }
return len;
error:
+ push_args(ctx, arg_addr);
push_args(ctx, arg_len);
push_args(ctx, arg_data);
return -1;
@@ -2509,6 +3407,7 @@ static const char *const boolean_name[] = {
"false", "true",
"no", "yes",
"N", "Y",
+ "off", "on",
NULL,
};
@@ -2658,22 +3557,40 @@ comp_rule_id(struct context *ctx, const struct token *token,
return i;
}
+/** Complete type field for RSS action. */
+static int
+comp_vc_action_rss_type(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size)
+{
+ unsigned int i;
+
+ (void)ctx;
+ (void)token;
+ for (i = 0; rss_type_table[i].str; ++i)
+ ;
+ if (!buf)
+ return i + 1;
+ if (ent < i)
+ return snprintf(buf, size, "%s", rss_type_table[ent].str);
+ if (ent == i)
+ return snprintf(buf, size, "end");
+ return -1;
+}
+
/** Complete queue field for RSS action. */
static int
comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
unsigned int ent, char *buf, unsigned int size)
{
- static const char *const str[] = { "", "end", NULL };
- unsigned int i;
-
(void)ctx;
(void)token;
- for (i = 0; str[i] != NULL; ++i)
- if (buf && i == ent)
- return snprintf(buf, size, "%s", str[i]);
- if (buf)
- return -1;
- return i;
+ if (!buf)
+ return nb_rxq + 1;
+ if (ent < nb_rxq)
+ return snprintf(buf, size, "%u", ent);
+ if (ent == nb_rxq)
+ return snprintf(buf, size, "end");
+ return -1;
}
/** Internal context. */
@@ -2927,7 +3844,7 @@ cmd_flow_parsed(const struct buffer *in)
break;
case QUERY:
port_flow_query(in->port, in->args.query.rule,
- in->args.query.action);
+ &in->args.query.action);
break;
case LIST:
port_flow_list(in->port, in->args.list.group_n,
diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c
index 35cad543..c904e44f 100644
--- a/app/test-pmd/cmdline_tm.c
+++ b/app/test-pmd/cmdline_tm.c
@@ -1500,7 +1500,7 @@ struct cmd_add_port_tm_nonleaf_node_result {
uint32_t priority;
uint32_t weight;
uint32_t level_id;
- uint32_t shaper_profile_id;
+ int32_t shaper_profile_id;
uint32_t n_sp_priorities;
uint64_t stats_mask;
cmdline_multi_string_t multi_shared_shaper_id;
@@ -1542,7 +1542,7 @@ cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_level_id =
level_id, UINT32);
cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_shaper_profile_id =
TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
- shaper_profile_id, UINT32);
+ shaper_profile_id, INT32);
cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_n_sp_priorities =
TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
n_sp_priorities, UINT32);
@@ -1593,7 +1593,11 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result,
return;
}
- np.shaper_profile_id = res->shaper_profile_id;
+ if (res->shaper_profile_id < 0)
+ np.shaper_profile_id = UINT32_MAX;
+ else
+ np.shaper_profile_id = res->shaper_profile_id;
+
np.n_shared_shapers = n_shared_shapers;
if (np.n_shared_shapers)
np.shared_shaper_id = &shared_shaper_id[0];
@@ -1651,7 +1655,7 @@ struct cmd_add_port_tm_leaf_node_result {
uint32_t priority;
uint32_t weight;
uint32_t level_id;
- uint32_t shaper_profile_id;
+ int32_t shaper_profile_id;
uint32_t cman_mode;
uint32_t wred_profile_id;
uint64_t stats_mask;
@@ -1693,7 +1697,7 @@ cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_level_id =
level_id, UINT32);
cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_shaper_profile_id =
TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
- shaper_profile_id, UINT32);
+ shaper_profile_id, INT32);
cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_cman_mode =
TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
cman_mode, UINT32);
@@ -1747,7 +1751,11 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result,
return;
}
- np.shaper_profile_id = res->shaper_profile_id;
+ if (res->shaper_profile_id < 0)
+ np.shaper_profile_id = UINT32_MAX;
+ else
+ np.shaper_profile_id = res->shaper_profile_id;
+
np.n_shared_shapers = n_shared_shapers;
if (np.n_shared_shapers)
@@ -1958,6 +1966,134 @@ cmdline_parse_inst_t cmd_set_port_tm_node_parent = {
},
};
+/* *** Suspend Port TM Node *** */
+struct cmd_suspend_port_tm_node_result {
+ cmdline_fixed_string_t suspend;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ uint16_t port_id;
+ uint32_t node_id;
+};
+
+cmdline_parse_token_string_t cmd_suspend_port_tm_node_suspend =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, suspend, "suspend");
+cmdline_parse_token_string_t cmd_suspend_port_tm_node_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, port, "port");
+cmdline_parse_token_string_t cmd_suspend_port_tm_node_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, tm, "tm");
+cmdline_parse_token_string_t cmd_suspend_port_tm_node_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, node, "node");
+cmdline_parse_token_num_t cmd_suspend_port_tm_node_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_suspend_port_tm_node_node_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, node_id, UINT32);
+
+static void cmd_suspend_port_tm_node_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_suspend_port_tm_node_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_tm_node_suspend(port_id, node_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_suspend_port_tm_node = {
+ .f = cmd_suspend_port_tm_node_parsed,
+ .data = NULL,
+ .help_str = "Suspend port tm node",
+ .tokens = {
+ (void *)&cmd_suspend_port_tm_node_suspend,
+ (void *)&cmd_suspend_port_tm_node_port,
+ (void *)&cmd_suspend_port_tm_node_tm,
+ (void *)&cmd_suspend_port_tm_node_node,
+ (void *)&cmd_suspend_port_tm_node_port_id,
+ (void *)&cmd_suspend_port_tm_node_node_id,
+ NULL,
+ },
+};
+
+/* *** Resume Port TM Node *** */
+struct cmd_resume_port_tm_node_result {
+ cmdline_fixed_string_t resume;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ uint16_t port_id;
+ uint32_t node_id;
+};
+
+cmdline_parse_token_string_t cmd_resume_port_tm_node_resume =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, resume, "resume");
+cmdline_parse_token_string_t cmd_resume_port_tm_node_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, port, "port");
+cmdline_parse_token_string_t cmd_resume_port_tm_node_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, tm, "tm");
+cmdline_parse_token_string_t cmd_resume_port_tm_node_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, node, "node");
+cmdline_parse_token_num_t cmd_resume_port_tm_node_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_resume_port_tm_node_node_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, node_id, UINT32);
+
+static void cmd_resume_port_tm_node_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_resume_port_tm_node_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_tm_node_resume(port_id, node_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_resume_port_tm_node = {
+ .f = cmd_resume_port_tm_node_parsed,
+ .data = NULL,
+ .help_str = "Resume port tm node",
+ .tokens = {
+ (void *)&cmd_resume_port_tm_node_resume,
+ (void *)&cmd_resume_port_tm_node_port,
+ (void *)&cmd_resume_port_tm_node_tm,
+ (void *)&cmd_resume_port_tm_node_node,
+ (void *)&cmd_resume_port_tm_node_port_id,
+ (void *)&cmd_resume_port_tm_node_node_id,
+ NULL,
+ },
+};
+
/* *** Port TM Hierarchy Commit *** */
struct cmd_port_tm_hierarchy_commit_result {
cmdline_fixed_string_t port;
diff --git a/app/test-pmd/cmdline_tm.h b/app/test-pmd/cmdline_tm.h
index ba303607..b3a14ade 100644
--- a/app/test-pmd/cmdline_tm.h
+++ b/app/test-pmd/cmdline_tm.h
@@ -22,6 +22,8 @@ extern cmdline_parse_inst_t cmd_add_port_tm_nonleaf_node;
extern cmdline_parse_inst_t cmd_add_port_tm_leaf_node;
extern cmdline_parse_inst_t cmd_del_port_tm_node;
extern cmdline_parse_inst_t cmd_set_port_tm_node_parent;
+extern cmdline_parse_inst_t cmd_suspend_port_tm_node;
+extern cmdline_parse_inst_t cmd_resume_port_tm_node;
extern cmdline_parse_inst_t cmd_port_tm_hierarchy_commit;
#endif /* _CMDLINE_TM_H_ */
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 4bb255c6..97020fb3 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -73,12 +73,7 @@ static const struct {
},
};
-struct rss_type_info {
- char str[32];
- uint64_t rss_type;
-};
-
-static const struct rss_type_info rss_type_table[] = {
+const struct rss_type_info rss_type_table[] = {
{ "ipv4", ETH_RSS_IPV4 },
{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
@@ -99,7 +94,12 @@ static const struct rss_type_info rss_type_table[] = {
{ "vxlan", ETH_RSS_VXLAN },
{ "geneve", ETH_RSS_GENEVE },
{ "nvgre", ETH_RSS_NVGRE },
-
+ { "ip", ETH_RSS_IP },
+ { "udp", ETH_RSS_UDP },
+ { "tcp", ETH_RSS_TCP },
+ { "sctp", ETH_RSS_SCTP },
+ { "tunnel", ETH_RSS_TUNNEL },
+ { NULL, 0 },
};
static void
@@ -121,15 +121,11 @@ nic_stats_display(portid_t port_id)
struct rte_eth_stats stats;
struct rte_port *port = &ports[port_id];
uint8_t i;
- portid_t pid;
static const char *nic_stats_border = "########################";
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return;
}
rte_eth_stats_get(port_id, &stats);
@@ -203,13 +199,8 @@ nic_stats_display(portid_t port_id)
void
nic_stats_clear(portid_t port_id)
{
- portid_t pid;
-
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return;
}
rte_eth_stats_reset(port_id);
@@ -286,15 +277,11 @@ nic_stats_mapping_display(portid_t port_id)
{
struct rte_port *port = &ports[port_id];
uint16_t i;
- portid_t pid;
static const char *nic_stats_mapping_border = "########################";
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return;
}
@@ -405,14 +392,11 @@ port_infos_display(portid_t port_id)
int vlan_offload;
struct rte_mempool * mp;
static const char *info_border = "*********************";
- portid_t pid;
uint16_t mtu;
+ char name[RTE_ETH_NAME_MAX_LEN];
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return;
}
port = &ports[port_id];
@@ -423,6 +407,8 @@ port_infos_display(portid_t port_id)
info_border, port_id, info_border);
rte_eth_macaddr_get(port_id, &mac_addr);
print_ethaddr("MAC address: ", &mac_addr);
+ rte_eth_dev_get_name_by_port(port_id, name);
+ printf("\nDevice name: %s", name);
printf("\nDriver name: %s", dev_info.driver_name);
printf("\nConnect to socket: %u", port->socket_id);
@@ -517,6 +503,18 @@ port_infos_display(portid_t port_id)
printf("Min possible number of TXDs per queue: %hu\n",
dev_info.tx_desc_lim.nb_min);
printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
+
+ /* Show switch info only if valid switch domain and port id is set */
+ if (dev_info.switch_info.domain_id !=
+ RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ if (dev_info.switch_info.name)
+ printf("Switch name: %s\n", dev_info.switch_info.name);
+
+ printf("Switch domain Id: %u\n",
+ dev_info.switch_info.domain_id);
+ printf("Switch Port Id: %u\n",
+ dev_info.switch_info.port_id);
+ }
}
void
@@ -722,6 +720,23 @@ port_offload_cap_display(portid_t port_id)
printf("off\n");
}
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
+ printf("IP tunnel TSO: ");
+ if (ports[port_id].dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_IP_TNL_TSO)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
+ printf("UDP tunnel TSO: ");
+ if (ports[port_id].dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_UDP_TNL_TSO)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
}
int
@@ -742,6 +757,17 @@ port_id_is_invalid(portid_t port_id, enum print_warning warning)
return 1;
}
+void print_valid_ports(void)
+{
+ portid_t pid;
+
+ printf("The valid ports array is [");
+ RTE_ETH_FOREACH_DEV(pid) {
+ printf(" %d", pid);
+ }
+ printf(" ]\n");
+}
+
static int
vlan_id_is_invalid(uint16_t vlan_id)
{
@@ -754,6 +780,8 @@ vlan_id_is_invalid(uint16_t vlan_id)
static int
port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
{
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus;
uint64_t pci_len;
if (reg_off & 0x3) {
@@ -762,7 +790,21 @@ port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
(unsigned)reg_off);
return 1;
}
- pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
+
+ if (!ports[port_id].dev_info.device) {
+ printf("Invalid device\n");
+ return 0;
+ }
+
+ bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
+ } else {
+ printf("Not a PCI device\n");
+ return 1;
+ }
+
+ pci_len = pci_dev->mem_resource[0].len;
if (reg_off >= pci_len) {
printf("Port %d: register offset %u (0x%X) out of port PCI "
"resource (length=%"PRIu64")\n",
@@ -960,8 +1002,9 @@ static const struct {
MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
MK_FLOW_ITEM(PF, 0),
MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
- MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
- MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
+ MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
+ MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
+ MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
@@ -980,33 +1023,89 @@ static const struct {
MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
+ MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
+ MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
+ MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
+ MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
+ MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
+ MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
};
-/** Compute storage space needed by item specification. */
-static void
-flow_item_spec_size(const struct rte_flow_item *item,
- size_t *size, size_t *pad)
+/** Pattern item specification types. */
+enum item_spec_type {
+ ITEM_SPEC,
+ ITEM_LAST,
+ ITEM_MASK,
+};
+
+/** Compute storage space needed by item specification and copy it. */
+static size_t
+flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
+ enum item_spec_type type)
{
- if (!item->spec) {
- *size = 0;
+ size_t size = 0;
+ const void *data =
+ type == ITEM_SPEC ? item->spec :
+ type == ITEM_LAST ? item->last :
+ type == ITEM_MASK ? item->mask :
+ NULL;
+
+ if (!item->spec || !data)
goto empty;
- }
switch (item->type) {
union {
const struct rte_flow_item_raw *raw;
} spec;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } last;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } mask;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } src;
+ union {
+ struct rte_flow_item_raw *raw;
+ } dst;
+ size_t off;
case RTE_FLOW_ITEM_TYPE_RAW:
spec.raw = item->spec;
- *size = offsetof(struct rte_flow_item_raw, pattern) +
- spec.raw->length * sizeof(*spec.raw->pattern);
+ last.raw = item->last ? item->last : item->spec;
+ mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
+ src.raw = data;
+ dst.raw = buf;
+ off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
+ sizeof(*src.raw->pattern));
+ if (type == ITEM_SPEC ||
+ (type == ITEM_MASK &&
+ ((spec.raw->length & mask.raw->length) >=
+ (last.raw->length & mask.raw->length))))
+ size = spec.raw->length & mask.raw->length;
+ else
+ size = last.raw->length & mask.raw->length;
+ size = off + size * sizeof(*src.raw->pattern);
+ if (dst.raw) {
+ memcpy(dst.raw, src.raw, sizeof(*src.raw));
+ dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
+ src.raw->pattern,
+ size - off);
+ }
break;
default:
- *size = flow_item[item->type].size;
+ size = flow_item[item->type].size;
+ if (buf)
+ memcpy(buf, data, size);
break;
}
empty:
- *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+ return RTE_ALIGN_CEIL(size, sizeof(double));
}
/** Generate flow_action[] entry. */
@@ -1028,39 +1127,92 @@ static const struct {
MK_FLOW_ACTION(FLAG, 0),
MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
MK_FLOW_ACTION(DROP, 0),
- MK_FLOW_ACTION(COUNT, 0),
- MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
- MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
+ MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
+ MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
MK_FLOW_ACTION(PF, 0),
MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
+ MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
+ MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
+ MK_FLOW_ACTION(OF_SET_MPLS_TTL,
+ sizeof(struct rte_flow_action_of_set_mpls_ttl)),
+ MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
+ MK_FLOW_ACTION(OF_SET_NW_TTL,
+ sizeof(struct rte_flow_action_of_set_nw_ttl)),
+ MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
+ MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
+ MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
+ MK_FLOW_ACTION(OF_POP_VLAN, 0),
+ MK_FLOW_ACTION(OF_PUSH_VLAN,
+ sizeof(struct rte_flow_action_of_push_vlan)),
+ MK_FLOW_ACTION(OF_SET_VLAN_VID,
+ sizeof(struct rte_flow_action_of_set_vlan_vid)),
+ MK_FLOW_ACTION(OF_SET_VLAN_PCP,
+ sizeof(struct rte_flow_action_of_set_vlan_pcp)),
+ MK_FLOW_ACTION(OF_POP_MPLS,
+ sizeof(struct rte_flow_action_of_pop_mpls)),
+ MK_FLOW_ACTION(OF_PUSH_MPLS,
+ sizeof(struct rte_flow_action_of_push_mpls)),
};
-/** Compute storage space needed by action configuration. */
-static void
-flow_action_conf_size(const struct rte_flow_action *action,
- size_t *size, size_t *pad)
+/** Compute storage space needed by action configuration and copy it. */
+static size_t
+flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
{
- if (!action->conf) {
- *size = 0;
+ size_t size = 0;
+
+ if (!action->conf)
goto empty;
- }
switch (action->type) {
union {
const struct rte_flow_action_rss *rss;
- } conf;
+ } src;
+ union {
+ struct rte_flow_action_rss *rss;
+ } dst;
+ size_t off;
case RTE_FLOW_ACTION_TYPE_RSS:
- conf.rss = action->conf;
- *size = offsetof(struct rte_flow_action_rss, queue) +
- conf.rss->num * sizeof(*conf.rss->queue);
+ src.rss = action->conf;
+ dst.rss = buf;
+ off = 0;
+ if (dst.rss)
+ *dst.rss = (struct rte_flow_action_rss){
+ .func = src.rss->func,
+ .level = src.rss->level,
+ .types = src.rss->types,
+ .key_len = src.rss->key_len,
+ .queue_num = src.rss->queue_num,
+ };
+ off += sizeof(*src.rss);
+ if (src.rss->key_len) {
+ off = RTE_ALIGN_CEIL(off, sizeof(double));
+ size = sizeof(*src.rss->key) * src.rss->key_len;
+ if (dst.rss)
+ dst.rss->key = memcpy
+ ((void *)((uintptr_t)dst.rss + off),
+ src.rss->key, size);
+ off += size;
+ }
+ if (src.rss->queue_num) {
+ off = RTE_ALIGN_CEIL(off, sizeof(double));
+ size = sizeof(*src.rss->queue) * src.rss->queue_num;
+ if (dst.rss)
+ dst.rss->queue = memcpy
+ ((void *)((uintptr_t)dst.rss + off),
+ src.rss->queue, size);
+ off += size;
+ }
+ size = off;
break;
default:
- *size = flow_action[action->type].size;
+ size = flow_action[action->type].size;
+ if (buf)
+ memcpy(buf, action->conf, size);
break;
}
empty:
- *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+ return RTE_ALIGN_CEIL(size, sizeof(double));
}
/** Generate a port_flow entry from attributes/pattern/actions. */
@@ -1073,7 +1225,6 @@ port_flow_new(const struct rte_flow_attr *attr,
const struct rte_flow_action *action;
struct port_flow *pf = NULL;
size_t tmp;
- size_t pad;
size_t off1 = 0;
size_t off2 = 0;
int err = ENOTSUP;
@@ -1091,24 +1242,23 @@ store:
if (pf)
dst = memcpy(pf->data + off1, item, sizeof(*item));
off1 += sizeof(*item);
- flow_item_spec_size(item, &tmp, &pad);
if (item->spec) {
if (pf)
- dst->spec = memcpy(pf->data + off2,
- item->spec, tmp);
- off2 += tmp + pad;
+ dst->spec = pf->data + off2;
+ off2 += flow_item_spec_copy
+ (pf ? pf->data + off2 : NULL, item, ITEM_SPEC);
}
if (item->last) {
if (pf)
- dst->last = memcpy(pf->data + off2,
- item->last, tmp);
- off2 += tmp + pad;
+ dst->last = pf->data + off2;
+ off2 += flow_item_spec_copy
+ (pf ? pf->data + off2 : NULL, item, ITEM_LAST);
}
if (item->mask) {
if (pf)
- dst->mask = memcpy(pf->data + off2,
- item->mask, tmp);
- off2 += tmp + pad;
+ dst->mask = pf->data + off2;
+ off2 += flow_item_spec_copy
+ (pf ? pf->data + off2 : NULL, item, ITEM_MASK);
}
off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
} while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
@@ -1125,12 +1275,11 @@ store:
if (pf)
dst = memcpy(pf->data + off1, action, sizeof(*action));
off1 += sizeof(*action);
- flow_action_conf_size(action, &tmp, &pad);
if (action->conf) {
if (pf)
- dst->conf = memcpy(pf->data + off2,
- action->conf, tmp);
- off2 += tmp + pad;
+ dst->conf = pf->data + off2;
+ off2 += flow_action_conf_copy
+ (pf ? pf->data + off2 : NULL, action);
}
off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
} while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
@@ -1168,10 +1317,15 @@ port_flow_complain(struct rte_flow_error *error)
[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
+ [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
+ [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
+ [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
+ [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
};
const char *errstr;
@@ -1326,7 +1480,7 @@ port_flow_flush(portid_t port_id)
/** Query a flow rule. */
int
port_flow_query(portid_t port_id, uint32_t rule,
- enum rte_flow_action_type action)
+ const struct rte_flow_action *action)
{
struct rte_flow_error error;
struct rte_port *port;
@@ -1347,16 +1501,17 @@ port_flow_query(portid_t port_id, uint32_t rule,
printf("Flow rule #%u not found\n", rule);
return -ENOENT;
}
- if ((unsigned int)action >= RTE_DIM(flow_action) ||
- !flow_action[action].name)
+ if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
+ !flow_action[action->type].name)
name = "unknown";
else
- name = flow_action[action].name;
- switch (action) {
+ name = flow_action[action->type].name;
+ switch (action->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
break;
default:
- printf("Cannot query action type %d (%s)\n", action, name);
+ printf("Cannot query action type %d (%s)\n",
+ action->type, name);
return -ENOTSUP;
}
/* Poisoning to make sure PMDs update it in case of error. */
@@ -1364,7 +1519,7 @@ port_flow_query(portid_t port_id, uint32_t rule,
memset(&query, 0, sizeof(query));
if (rte_flow_query(port_id, pf->flow, action, &query, &error))
return port_flow_complain(&error);
- switch (action) {
+ switch (action->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
printf("%s:\n"
" hits_set: %u\n"
@@ -1379,7 +1534,7 @@ port_flow_query(portid_t port_id, uint32_t rule,
break;
default:
printf("Cannot display result for action type %d (%s)\n",
- action, name);
+ action->type, name);
break;
}
return 0;
@@ -1429,12 +1584,13 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
const struct rte_flow_item *item = pf->pattern;
const struct rte_flow_action *action = pf->actions;
- printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t",
+ printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
pf->id,
pf->attr.group,
pf->attr.priority,
pf->attr.ingress ? 'i' : '-',
- pf->attr.egress ? 'e' : '-');
+ pf->attr.egress ? 'e' : '-',
+ pf->attr.transfer ? 't' : '-');
while (item->type != RTE_FLOW_ITEM_TYPE_END) {
if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
printf("%s ", flow_item[item->type].name);
@@ -1664,6 +1820,7 @@ void
rxtx_config_display(void)
{
portid_t pid;
+ queueid_t qid;
printf(" %s packet forwarding%s packets/burst=%d\n",
cur_fwd_eng->fwd_mode_name,
@@ -1678,30 +1835,63 @@ rxtx_config_display(void)
nb_fwd_lcores, nb_fwd_ports);
RTE_ETH_FOREACH_DEV(pid) {
- struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf;
- struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf;
-
- printf(" port %d:\n", (unsigned int)pid);
- printf(" CRC stripping %s\n",
- (ports[pid].dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ?
- "enabled" : "disabled");
- printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n",
- nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
- printf(" RX threshold registers: pthresh=%d hthresh=%d "
- " wthresh=%d\n",
- rx_conf->rx_thresh.pthresh,
- rx_conf->rx_thresh.hthresh,
- rx_conf->rx_thresh.wthresh);
- printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n",
- nb_txq, nb_txd, tx_conf->tx_free_thresh);
- printf(" TX threshold registers: pthresh=%d hthresh=%d "
- " wthresh=%d\n",
- tx_conf->tx_thresh.pthresh,
- tx_conf->tx_thresh.hthresh,
- tx_conf->tx_thresh.wthresh);
- printf(" TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n",
- tx_conf->tx_rs_thresh, tx_conf->offloads);
+ struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
+ struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
+ uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
+ uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
+ uint16_t nb_rx_desc_tmp;
+ uint16_t nb_tx_desc_tmp;
+ struct rte_eth_rxq_info rx_qinfo;
+ struct rte_eth_txq_info tx_qinfo;
+ int32_t rc;
+
+ /* per port config */
+ printf(" port %d: RX queue number: %d Tx queue number: %d\n",
+ (unsigned int)pid, nb_rxq, nb_txq);
+
+ printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
+ ports[pid].dev_conf.rxmode.offloads,
+ ports[pid].dev_conf.txmode.offloads);
+
+ /* per rx queue config only for first queue to be less verbose */
+ for (qid = 0; qid < 1; qid++) {
+ rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
+ if (rc)
+ nb_rx_desc_tmp = nb_rx_desc[qid];
+ else
+ nb_rx_desc_tmp = rx_qinfo.nb_desc;
+
+ printf(" RX queue: %d\n", qid);
+ printf(" RX desc=%d - RX free threshold=%d\n",
+ nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
+ printf(" RX threshold registers: pthresh=%d hthresh=%d "
+ " wthresh=%d\n",
+ rx_conf[qid].rx_thresh.pthresh,
+ rx_conf[qid].rx_thresh.hthresh,
+ rx_conf[qid].rx_thresh.wthresh);
+ printf(" RX Offloads=0x%"PRIx64"\n",
+ rx_conf[qid].offloads);
+ }
+
+ /* per tx queue config only for first queue to be less verbose */
+ for (qid = 0; qid < 1; qid++) {
+ rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
+ if (rc)
+ nb_tx_desc_tmp = nb_tx_desc[qid];
+ else
+ nb_tx_desc_tmp = tx_qinfo.nb_desc;
+
+ printf(" TX queue: %d\n", qid);
+ printf(" TX desc=%d - TX free threshold=%d\n",
+ nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
+ printf(" TX threshold registers: pthresh=%d hthresh=%d "
+ " wthresh=%d\n",
+ tx_conf[qid].tx_thresh.pthresh,
+ tx_conf[qid].tx_thresh.hthresh,
+ tx_conf[qid].tx_thresh.wthresh);
+ printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
+ tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
+ }
}
}
@@ -1761,7 +1951,7 @@ port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
}
rss_conf.rss_hf = 0;
- for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ for (i = 0; rss_type_table[i].str; i++) {
if (!strcmp(rss_info, rss_type_table[i].str))
rss_conf.rss_hf = rss_type_table[i].rss_type;
}
@@ -1790,7 +1980,7 @@ port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
return;
}
printf("RSS functions:\n ");
- for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ for (i = 0; rss_type_table[i].str; i++) {
if (rss_hf & rss_type_table[i].rss_type)
printf("%s ", rss_type_table[i].str);
}
@@ -1814,7 +2004,7 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
rss_conf.rss_key = NULL;
rss_conf.rss_key_len = hash_key_len;
rss_conf.rss_hf = 0;
- for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ for (i = 0; rss_type_table[i].str; i++) {
if (!strcmp(rss_type_table[i].str, rss_type))
rss_conf.rss_hf = rss_type_table[i].rss_type;
}
@@ -3018,6 +3208,7 @@ flowtype_to_str(uint16_t flow_type)
{"vxlan", RTE_ETH_FLOW_VXLAN},
{"geneve", RTE_ETH_FLOW_GENEVE},
{"nvgre", RTE_ETH_FLOW_NVGRE},
+ {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
};
for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 5f5ab64a..0bb88cf7 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -49,9 +49,12 @@
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
-#define GRE_KEY_PRESENT 0x2000
-#define GRE_KEY_LEN 4
-#define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT
+#define GRE_CHECKSUM_PRESENT 0x8000
+#define GRE_KEY_PRESENT 0x2000
+#define GRE_SEQUENCE_PRESENT 0x1000
+#define GRE_EXT_LEN 4
+#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
+ GRE_SEQUENCE_PRESENT)
/* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
@@ -60,6 +63,8 @@
#define _htons(x) (x)
#endif
+uint16_t vxlan_gpe_udp_port = 4790;
+
/* structure that caches offload info for the current packet */
struct testpmd_offload_info {
uint16_t ethertype;
@@ -194,6 +199,70 @@ parse_vxlan(struct udp_hdr *udp_hdr,
info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
}
+/* Parse a vxlan-gpe header */
+static void
+parse_vxlan_gpe(struct udp_hdr *udp_hdr,
+ struct testpmd_offload_info *info)
+{
+ struct ether_hdr *eth_hdr;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct vxlan_gpe_hdr *vxlan_gpe_hdr;
+ uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
+
+ /* Check udp destination port. */
+ if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port))
+ return;
+
+ vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)((char *)udp_hdr +
+ sizeof(struct udp_hdr));
+
+ if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
+ VXLAN_GPE_TYPE_IPV4) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ ipv4_hdr = (struct ipv4_hdr *)((char *)vxlan_gpe_hdr +
+ vxlan_gpe_len);
+
+ parse_ipv4(ipv4_hdr, info);
+ info->ethertype = _htons(ETHER_TYPE_IPv4);
+ info->l2_len = 0;
+
+ } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_IPV6) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr +
+ vxlan_gpe_len);
+
+ info->ethertype = _htons(ETHER_TYPE_IPv6);
+ parse_ipv6(ipv6_hdr, info);
+ info->l2_len = 0;
+
+ } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_ETH) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ eth_hdr = (struct ether_hdr *)((char *)vxlan_gpe_hdr +
+ vxlan_gpe_len);
+
+ parse_ethernet(eth_hdr, info);
+ } else
+ return;
+
+ info->l2_len += ETHER_VXLAN_GPE_HLEN;
+}
+
/* Parse a gre header */
static void
parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
@@ -203,14 +272,14 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
struct ipv6_hdr *ipv6_hdr;
uint8_t gre_len = 0;
- /* check which fields are supported */
- if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0)
- return;
-
gre_len += sizeof(struct simple_gre_hdr);
if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
- gre_len += GRE_KEY_LEN;
+ gre_len += GRE_EXT_LEN;
+ if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT))
+ gre_len += GRE_EXT_LEN;
+ if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
+ gre_len += GRE_EXT_LEN;
if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
info->is_tunnel = 1;
@@ -588,6 +657,10 @@ pkt_copy_split(const struct rte_mbuf *pkt)
* Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
* Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
* UDP|TCP|SCTP
+ * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
+ * UDP|TCP|SCTP
+ * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
+ * UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
@@ -664,7 +737,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
m = pkts_burst[i];
info.is_tunnel = 0;
info.pkt_len = rte_pktmbuf_pkt_len(m);
- tx_ol_flags = 0;
+ tx_ol_flags = m->ol_flags &
+ (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF);
rx_ol_flags = m->ol_flags;
/* Update the L3/L4 checksum error packet statistics */
@@ -691,9 +765,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
info.l3_len);
- parse_vxlan(udp_hdr, &info, m->packet_type);
- if (info.is_tunnel)
- tx_ol_flags |= PKT_TX_TUNNEL_VXLAN;
+ parse_vxlan_gpe(udp_hdr, &info);
+ if (info.is_tunnel) {
+ tx_ol_flags |= PKT_TX_TUNNEL_VXLAN_GPE;
+ } else {
+ parse_vxlan(udp_hdr, &info,
+ m->packet_type);
+ if (info.is_tunnel)
+ tx_ol_flags |=
+ PKT_TX_TUNNEL_VXLAN;
+ }
} else if (info.l4_proto == IPPROTO_GRE) {
struct simple_gre_hdr *gre_hdr;
@@ -738,6 +819,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
/* step 3: fill the mbuf meta data (flags and header lengths) */
+ m->tx_offload = 0;
if (info.is_tunnel == 1) {
if (info.tunnel_tso_segsz ||
(tx_offloads &
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 2adce701..7cac757a 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -96,7 +96,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
&eth_hdr->d_addr);
ether_addr_copy(&ports[fs->tx_port].eth_addr,
&eth_hdr->s_addr);
- mb->ol_flags = ol_flags;
+ mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct ether_hdr);
mb->l3_len = sizeof(struct ipv4_hdr);
mb->vlan_tci = txp->tx_vlan_id;
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index e2cc4812..a8384d5b 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -127,7 +127,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
ether_addr_copy(&eth_hdr->s_addr, &eth_hdr->d_addr);
ether_addr_copy(&addr, &eth_hdr->s_addr);
- mb->ol_flags = ol_flags;
+ mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct ether_hdr);
mb->l3_len = sizeof(struct ipv4_hdr);
mb->vlan_tci = txp->tx_vlan_id;
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index 7ed74db2..a51514b0 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+# override default name to drop the hyphen
+name = 'testpmd'
+allow_experimental_apis = true
sources = files('cmdline.c',
'cmdline_flow.c',
'cmdline_mtr.c',
@@ -32,22 +35,10 @@ if dpdk_conf.has('RTE_LIBRTE_SOFTNIC_PMD')
sources += files('tm.c')
deps += 'pmd_softnic'
endif
-
-dep_objs = []
-foreach d:deps
- dep_objs += get_variable(get_option('default_library') + '_rte_' + d)
-endforeach
-dep_objs += cc.find_library('execinfo', required: false) # for BSD only
-
-link_libs = []
-if get_option('default_library') == 'static'
- link_libs = dpdk_drivers
+if dpdk_conf.has('RTE_LIBRTE_DPAA_PMD')
+ deps += ['bus_dpaa', 'mempool_dpaa', 'pmd_dpaa']
+endif
+if dpdk_conf.has('RTE_LIBRTE_BPF')
+ sources += files('bpf_cmd.c')
+ deps += 'bpf'
endif
-
-executable('dpdk-testpmd',
- sources,
- c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'],
- link_whole: link_libs,
- dependencies: dep_objs,
- install_rpath: join_paths(get_option('prefix'), driver_install_path),
- install: true)
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 97d22b86..75807623 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -70,7 +70,7 @@ usage(char* progname)
"--rss-ip | --rss-udp | "
"--rxpt= | --rxht= | --rxwt= | --rxfreet= | "
"--txpt= | --txht= | --txwt= | --txfreet= | "
- "--txrst= | --tx-offloads ]\n",
+ "--txrst= | --tx-offloads= | --vxlan-gpe-port= ]\n",
progname);
#ifdef RTE_LIBRTE_CMDLINE
printf(" --interactive: run in interactive mode.\n");
@@ -186,6 +186,10 @@ usage(char* progname)
printf(" --flow-isolate-all: "
"requests flow API isolated mode on all ports at initialization time.\n");
printf(" --tx-offloads=0xXXXXXXXX: hexadecimal bitmask of TX queue offloads\n");
+ printf(" --hot-plug: enable hot plug for device.\n");
+ printf(" --vxlan-gpe-port=N: UPD port of tunnel VXLAN-GPE\n");
+ printf(" --mlockall: lock all memory\n");
+ printf(" --no-mlockall: do not lock all memory\n");
}
#ifdef RTE_LIBRTE_CMDLINE
@@ -373,7 +377,6 @@ parse_portnuma_config(const char *q_arg)
};
unsigned long int_fld[_NUM_FLD];
char *str_fld[_NUM_FLD];
- portid_t pid;
/* reset from value set at definition */
while ((p = strchr(p0,'(')) != NULL) {
@@ -397,10 +400,7 @@ parse_portnuma_config(const char *q_arg)
port_id = (portid_t)int_fld[FLD_PORT];
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
port_id == (portid_t)RTE_PORT_ALL) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return -1;
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
@@ -431,7 +431,6 @@ parse_ringnuma_config(const char *q_arg)
};
unsigned long int_fld[_NUM_FLD];
char *str_fld[_NUM_FLD];
- portid_t pid;
#define RX_RING_ONLY 0x1
#define TX_RING_ONLY 0x2
#define RXTX_RING 0x3
@@ -458,10 +457,7 @@ parse_ringnuma_config(const char *q_arg)
port_id = (portid_t)int_fld[FLD_PORT];
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
port_id == (portid_t)RTE_PORT_ALL) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return -1;
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
@@ -512,6 +508,8 @@ parse_event_printing_config(const char *optarg, int enable)
mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET;
else if (!strcmp(optarg, "vf_mbox"))
mask = UINT32_C(1) << RTE_ETH_EVENT_VF_MBOX;
+ else if (!strcmp(optarg, "ipsec"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_IPSEC;
else if (!strcmp(optarg, "macsec"))
mask = UINT32_C(1) << RTE_ETH_EVENT_MACSEC;
else if (!strcmp(optarg, "intr_rmv"))
@@ -544,6 +542,8 @@ launch_args_parse(int argc, char** argv)
/* Default offloads for all ports. */
uint64_t rx_offloads = rx_mode.offloads;
uint64_t tx_offloads = tx_mode.offloads;
+ struct rte_eth_dev_info dev_info;
+ uint16_t rec_nb_pkts;
static struct option lgopts[] = {
{ "help", 0, 0, 0 },
@@ -621,6 +621,10 @@ launch_args_parse(int argc, char** argv)
{ "print-event", 1, 0, 0 },
{ "mask-event", 1, 0, 0 },
{ "tx-offloads", 1, 0, 0 },
+ { "hot-plug", 0, 0, 0 },
+ { "vxlan-gpe-port", 1, 0, 0 },
+ { "mlockall", 0, 0, 0 },
+ { "no-mlockall", 0, 0, 0 },
{ 0, 0, 0, 0 },
};
@@ -658,9 +662,8 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "cmdline-file")) {
printf("CLI commands to be read from %s\n",
optarg);
- snprintf(cmdline_filename,
- sizeof(cmdline_filename), "%s",
- optarg);
+ strlcpy(cmdline_filename, optarg,
+ sizeof(cmdline_filename));
}
if (!strcmp(lgopts[opt_idx].name, "auto-start")) {
printf("Auto-start selected\n");
@@ -948,12 +951,38 @@ launch_args_parse(int argc, char** argv)
}
if (!strcmp(lgopts[opt_idx].name, "burst")) {
n = atoi(optarg);
- if ((n >= 1) && (n <= MAX_PKT_BURST))
- nb_pkt_per_burst = (uint16_t) n;
- else
+ if (n == 0) {
+ /* A burst size of zero means that the
+ * PMD should be queried for
+ * recommended Rx burst size. Since
+ * testpmd uses a single size for all
+ * ports, port 0 is queried for the
+ * value, on the assumption that all
+ * ports are of the same NIC model.
+ */
+ rte_eth_dev_info_get(0, &dev_info);
+ rec_nb_pkts = dev_info
+ .default_rxportconf.burst_size;
+
+ if (rec_nb_pkts == 0)
+ rte_exit(EXIT_FAILURE,
+ "PMD does not recommend a burst size. "
+ "Provided value must be between "
+ "1 and %d\n", MAX_PKT_BURST);
+ else if (rec_nb_pkts > MAX_PKT_BURST)
+ rte_exit(EXIT_FAILURE,
+ "PMD recommended burst size of %d"
+ " exceeds maximum value of %d\n",
+ rec_nb_pkts, MAX_PKT_BURST);
+ printf("Using PMD-provided burst value of %d\n",
+ rec_nb_pkts);
+ nb_pkt_per_burst = rec_nb_pkts;
+ } else if (n > MAX_PKT_BURST)
rte_exit(EXIT_FAILURE,
- "burst must >= 1 and <= %d]",
- MAX_PKT_BURST);
+ "burst must be between1 and %d\n",
+ MAX_PKT_BURST);
+ else
+ nb_pkt_per_burst = (uint16_t) n;
}
if (!strcmp(lgopts[opt_idx].name, "mbcache")) {
n = atoi(optarg);
@@ -1092,6 +1121,14 @@ launch_args_parse(int argc, char** argv)
rte_exit(EXIT_FAILURE,
"tx-offloads must be >= 0\n");
}
+ if (!strcmp(lgopts[opt_idx].name, "vxlan-gpe-port")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ vxlan_gpe_udp_port = (uint16_t)n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "vxlan-gpe-port must be >= 0\n");
+ }
if (!strcmp(lgopts[opt_idx].name, "print-event"))
if (parse_event_printing_config(optarg, 1)) {
rte_exit(EXIT_FAILURE,
@@ -1102,7 +1139,12 @@ launch_args_parse(int argc, char** argv)
rte_exit(EXIT_FAILURE,
"invalid mask-event argument\n");
}
-
+ if (!strcmp(lgopts[opt_idx].name, "hot-plug"))
+ hot_plug = 1;
+ if (!strcmp(lgopts[opt_idx].name, "mlockall"))
+ do_mlockall = 1;
+ if (!strcmp(lgopts[opt_idx].name, "no-mlockall"))
+ do_mlockall = 0;
break;
case 'h':
usage(argv[0]);
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 4c0e2586..35cf2667 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -12,6 +12,7 @@
#include <sys/mman.h>
#include <sys/types.h>
#include <errno.h>
+#include <stdbool.h>
#include <sys/queue.h>
#include <sys/stat.h>
@@ -210,9 +211,10 @@ queueid_t nb_txq = 1; /**< Number of TX queues per port. */
/*
* Configurable number of RX/TX ring descriptors.
+ * Defaults are supplied by drivers via ethdev.
*/
-#define RTE_TEST_RX_DESC_DEFAULT 1024
-#define RTE_TEST_TX_DESC_DEFAULT 1024
+#define RTE_TEST_RX_DESC_DEFAULT 0
+#define RTE_TEST_TX_DESC_DEFAULT 0
uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
@@ -284,6 +286,8 @@ uint8_t lsc_interrupt = 1; /* enabled by default */
*/
uint8_t rmv_interrupt = 1; /* enabled by default */
+uint8_t hot_plug = 0; /**< hotplug disabled by default. */
+
/*
* Display or mask ether events
* Default to all events except VF_MBOX
@@ -292,8 +296,13 @@ uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
(UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
(UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
(UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
+ (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
(UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
(UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
+/*
+ * Decide if all memory are locked for performance.
+ */
+int do_mlockall = 0;
/*
* NIC bypass mode configuration options.
@@ -391,6 +400,12 @@ static void check_all_ports_link_status(uint32_t port_mask);
static int eth_event_callback(portid_t port_id,
enum rte_eth_event_type type,
void *param, void *ret_param);
+static void eth_dev_event_callback(char *device_name,
+ enum rte_dev_event_type type,
+ void *param);
+static int eth_dev_event_callback_register(void);
+static int eth_dev_event_callback_unregister(void);
+
/*
* Check if all the ports are started.
@@ -656,6 +671,7 @@ init_config(void)
uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
struct rte_gro_param gro_param;
uint32_t gso_types;
+ int k;
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
@@ -690,6 +706,11 @@ init_config(void)
port->dev_conf.txmode = tx_mode;
port->dev_conf.rxmode = rx_mode;
rte_eth_dev_info_get(pid, &port->dev_info);
+
+ if (!(port->dev_info.rx_offload_capa &
+ DEV_RX_OFFLOAD_CRC_STRIP))
+ port->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_CRC_STRIP;
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
@@ -707,6 +728,15 @@ init_config(void)
}
}
+ /* Apply Rx offloads configuration */
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
+ port->rx_conf[k].offloads =
+ port->dev_conf.rxmode.offloads;
+ /* Apply Tx offloads configuration */
+ for (k = 0; k < port->dev_info.max_tx_queues; k++)
+ port->tx_conf[k].offloads =
+ port->dev_conf.txmode.offloads;
+
/* set flag to initialize port/queue */
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
@@ -871,18 +901,23 @@ init_fwd_streams(void)
/* init new */
nb_fwd_streams = nb_fwd_streams_new;
- fwd_streams = rte_zmalloc("testpmd: fwd_streams",
- sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
- if (fwd_streams == NULL)
- rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
- "failed\n", nb_fwd_streams);
+ if (nb_fwd_streams) {
+ fwd_streams = rte_zmalloc("testpmd: fwd_streams",
+ sizeof(struct fwd_stream *) * nb_fwd_streams,
+ RTE_CACHE_LINE_SIZE);
+ if (fwd_streams == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
+ " (struct fwd_stream *)) failed\n",
+ nb_fwd_streams);
- for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
- fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
- sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
- if (fwd_streams[sm_id] == NULL)
- rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
- " failed\n");
+ for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
+ fwd_streams[sm_id] = rte_zmalloc("testpmd:"
+ " struct fwd_stream", sizeof(struct fwd_stream),
+ RTE_CACHE_LINE_SIZE);
+ if (fwd_streams[sm_id] == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc"
+ "(struct fwd_stream) failed\n");
+ }
}
return 0;
@@ -916,6 +951,9 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
pktnb_stats[1] = pktnb_stats[0];
burst_stats[0] = nb_burst;
pktnb_stats[0] = nb_pkt;
+ } else if (nb_burst > burst_stats[1]) {
+ burst_stats[1] = nb_burst;
+ pktnb_stats[1] = nb_pkt;
}
}
if (total_burst == 0)
@@ -1110,9 +1148,8 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
uint64_t tics_per_1sec;
uint64_t tics_datum;
uint64_t tics_current;
- uint8_t idx_port, cnt_ports;
+ uint16_t idx_port;
- cnt_ports = rte_eth_dev_count();
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
@@ -1127,9 +1164,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_current = rte_rdtsc();
if (tics_current - tics_datum >= tics_per_1sec) {
/* Periodic bitrate calculation */
- for (idx_port = 0;
- idx_port < cnt_ports;
- idx_port++)
+ RTE_ETH_FOREACH_DEV(idx_port)
rte_stats_bitrate_calc(bitrate_data,
idx_port);
tics_datum = tics_current;
@@ -1202,6 +1237,31 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
}
/*
+ * Update the forward ports list.
+ */
+void
+update_fwd_ports(portid_t new_pid)
+{
+ unsigned int i;
+ unsigned int new_nb_fwd_ports = 0;
+ int move = 0;
+
+ for (i = 0; i < nb_fwd_ports; ++i) {
+ if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
+ move = 1;
+ else if (move)
+ fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
+ else
+ new_nb_fwd_ports++;
+ }
+ if (new_pid < RTE_MAX_ETHPORTS)
+ fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
+
+ nb_fwd_ports = new_nb_fwd_ports;
+ nb_cfg_ports = new_nb_fwd_ports;
+}
+
+/*
* Launch packet forwarding configuration.
*/
void
@@ -1236,10 +1296,6 @@ start_packet_forwarding(int with_tx_first)
return;
}
- if (init_fwd_streams() < 0) {
- printf("Fail from init_fwd_streams()\n");
- return;
- }
if(dcb_test) {
for (i = 0; i < nb_fwd_ports; i++) {
@@ -1259,10 +1315,11 @@ start_packet_forwarding(int with_tx_first)
}
test_done = 0;
+ fwd_config_setup();
+
if(!no_flush_rx)
flush_fwd_rx_queues();
- fwd_config_setup();
pkt_fwd_config_display(&cur_fwd_config);
rxtx_config_display();
@@ -1586,20 +1643,21 @@ start_port(portid_t pid)
}
if (port->need_reconfig_queues > 0) {
port->need_reconfig_queues = 0;
- port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
- /* Apply Tx offloads configuration */
- port->tx_conf.offloads = port->dev_conf.txmode.offloads;
/* setup tx queues */
for (qi = 0; qi < nb_txq; qi++) {
+ port->tx_conf[qi].txq_flags =
+ ETH_TXQ_FLAGS_IGNORE;
if ((numa_support) &&
(txring_numa[pi] != NUMA_NO_CONFIG))
diag = rte_eth_tx_queue_setup(pi, qi,
- nb_txd,txring_numa[pi],
- &(port->tx_conf));
+ port->nb_tx_desc[qi],
+ txring_numa[pi],
+ &(port->tx_conf[qi]));
else
diag = rte_eth_tx_queue_setup(pi, qi,
- nb_txd,port->socket_id,
- &(port->tx_conf));
+ port->nb_tx_desc[qi],
+ port->socket_id,
+ &(port->tx_conf[qi]));
if (diag == 0)
continue;
@@ -1610,15 +1668,14 @@ start_port(portid_t pid)
RTE_PORT_STOPPED) == 0)
printf("Port %d can not be set back "
"to stopped\n", pi);
- printf("Fail to configure port %d tx queues\n", pi);
+ printf("Fail to configure port %d tx queues\n",
+ pi);
/* try to reconfigure queues next time */
port->need_reconfig_queues = 1;
return -1;
}
- /* Apply Rx offloads configuration */
- port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
- /* setup rx queues */
for (qi = 0; qi < nb_rxq; qi++) {
+ /* setup rx queues */
if ((numa_support) &&
(rxring_numa[pi] != NUMA_NO_CONFIG)) {
struct rte_mempool * mp =
@@ -1632,8 +1689,10 @@ start_port(portid_t pid)
}
diag = rte_eth_rx_queue_setup(pi, qi,
- nb_rxd,rxring_numa[pi],
- &(port->rx_conf),mp);
+ port->nb_rx_desc[qi],
+ rxring_numa[pi],
+ &(port->rx_conf[qi]),
+ mp);
} else {
struct rte_mempool *mp =
mbuf_pool_find(port->socket_id);
@@ -1645,8 +1704,10 @@ start_port(portid_t pid)
return -1;
}
diag = rte_eth_rx_queue_setup(pi, qi,
- nb_rxd,port->socket_id,
- &(port->rx_conf), mp);
+ port->nb_rx_desc[qi],
+ port->socket_id,
+ &(port->rx_conf[qi]),
+ mp);
}
if (diag == 0)
continue;
@@ -1657,7 +1718,8 @@ start_port(portid_t pid)
RTE_PORT_STOPPED) == 0)
printf("Port %d can not be set back "
"to stopped\n", pi);
- printf("Fail to configure port %d rx queues\n", pi);
+ printf("Fail to configure port %d rx queues\n",
+ pi);
/* try to reconfigure queues next time */
port->need_reconfig_queues = 1;
return -1;
@@ -1853,6 +1915,39 @@ reset_port(portid_t pid)
printf("Done\n");
}
+static int
+eth_dev_event_callback_register(void)
+{
+ int ret;
+
+ /* register the device event callback */
+ ret = rte_dev_event_callback_register(NULL,
+ eth_dev_event_callback, NULL);
+ if (ret) {
+ printf("Failed to register device event callback\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int
+eth_dev_event_callback_unregister(void)
+{
+ int ret;
+
+ /* unregister the device event callback */
+ ret = rte_dev_event_callback_unregister(NULL,
+ eth_dev_event_callback, NULL);
+ if (ret < 0) {
+ printf("Failed to unregister device event callback\n");
+ return -1;
+ }
+
+ return 0;
+}
+
void
attach_port(char *identifier)
{
@@ -1876,10 +1971,12 @@ attach_port(char *identifier)
reconfig(pi, socket_id);
rte_eth_promiscuous_enable(pi);
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
ports[pi].port_status = RTE_PORT_STOPPED;
+ update_fwd_ports(pi);
+
printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
printf("Done\n");
}
@@ -1900,14 +1997,16 @@ detach_port(portid_t port_id)
port_flow_flush(port_id);
if (rte_eth_dev_detach(port_id, name)) {
- TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
+ TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
return;
}
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
- printf("Port '%s' is detached. Now total ports is %d\n",
- name, nb_ports);
+ update_fwd_ports(RTE_MAX_ETHPORTS);
+
+ printf("Port %u is detached. Now total ports is %d\n",
+ port_id, nb_ports);
printf("Done\n");
return;
}
@@ -1915,7 +2014,9 @@ detach_port(portid_t port_id)
void
pmd_test_exit(void)
{
+ struct rte_device *device;
portid_t pt_id;
+ int ret;
if (test_done == 0)
stop_packet_forwarding();
@@ -1927,8 +2028,33 @@ pmd_test_exit(void)
fflush(stdout);
stop_port(pt_id);
close_port(pt_id);
+
+ /*
+ * This is a workaround to fix a virtio-user issue that
+ * requires to call clean-up routine to remove existing
+ * socket.
+ * This workaround valid only for testpmd, needs a fix
+ * valid for all applications.
+ * TODO: Implement proper resource cleanup
+ */
+ device = rte_eth_devices[pt_id].device;
+ if (device && !strcmp(device->driver->name, "net_virtio_user"))
+ detach_port(pt_id);
}
}
+
+ if (hot_plug) {
+ ret = rte_dev_event_monitor_stop();
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "fail to stop device event monitor.");
+
+ ret = eth_dev_event_callback_unregister();
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "fail to unregister all event callbacks.");
+ }
+
printf("\nBye...\n");
}
@@ -1999,18 +2125,23 @@ check_all_ports_link_status(uint32_t port_mask)
static void
rmv_event_callback(void *arg)
{
- struct rte_eth_dev *dev;
+ int need_to_start = 0;
+ int org_no_link_check = no_link_check;
portid_t port_id = (intptr_t)arg;
RTE_ETH_VALID_PORTID_OR_RET(port_id);
- dev = &rte_eth_devices[port_id];
+ if (!test_done && port_is_forwarding(port_id)) {
+ need_to_start = 1;
+ stop_packet_forwarding();
+ }
+ no_link_check = 1;
stop_port(port_id);
+ no_link_check = org_no_link_check;
close_port(port_id);
- printf("removing device %s\n", dev->device->name);
- if (rte_eal_dev_detach(dev->device))
- TESTPMD_LOG(ERR, "Failed to detach device %s\n",
- dev->device->name);
+ detach_port(port_id);
+ if (need_to_start)
+ start_packet_forwarding(0);
}
/* This function is used by the interrupt thread */
@@ -2024,6 +2155,7 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
+ [RTE_ETH_EVENT_IPSEC] = "IPsec",
[RTE_ETH_EVENT_MACSEC] = "MACsec",
[RTE_ETH_EVENT_INTR_RMV] = "device removal",
[RTE_ETH_EVENT_NEW] = "device probed",
@@ -2059,6 +2191,37 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
return 0;
}
+/* This function is used by the interrupt thread */
+static void
+eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
+ __rte_unused void *arg)
+{
+ if (type >= RTE_DEV_EVENT_MAX) {
+ fprintf(stderr, "%s called upon invalid event %d\n",
+ __func__, type);
+ fflush(stderr);
+ }
+
+ switch (type) {
+ case RTE_DEV_EVENT_REMOVE:
+ RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
+ device_name);
+ /* TODO: After finish failure handle, begin to stop
+ * packet forward, stop port, close port, detach port.
+ */
+ break;
+ case RTE_DEV_EVENT_ADD:
+ RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
+ device_name);
+ /* TODO: After finish kernel driver binding,
+ * begin to attach port.
+ */
+ break;
+ default:
+ break;
+ }
+}
+
static int
set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
{
@@ -2140,39 +2303,51 @@ map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
static void
rxtx_port_config(struct rte_port *port)
{
- port->rx_conf = port->dev_info.default_rxconf;
- port->tx_conf = port->dev_info.default_txconf;
+ uint16_t qid;
- /* Check if any RX/TX parameters have been passed */
- if (rx_pthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_thresh.pthresh = rx_pthresh;
+ for (qid = 0; qid < nb_rxq; qid++) {
+ port->rx_conf[qid] = port->dev_info.default_rxconf;
- if (rx_hthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_thresh.hthresh = rx_hthresh;
+ /* Check if any Rx parameters have been passed */
+ if (rx_pthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
- if (rx_wthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_thresh.wthresh = rx_wthresh;
+ if (rx_hthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
- if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_free_thresh = rx_free_thresh;
+ if (rx_wthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
- if (rx_drop_en != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_drop_en = rx_drop_en;
+ if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
+
+ if (rx_drop_en != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_drop_en = rx_drop_en;
+
+ port->nb_rx_desc[qid] = nb_rxd;
+ }
- if (tx_pthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_thresh.pthresh = tx_pthresh;
+ for (qid = 0; qid < nb_txq; qid++) {
+ port->tx_conf[qid] = port->dev_info.default_txconf;
- if (tx_hthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_thresh.hthresh = tx_hthresh;
+ /* Check if any Tx parameters have been passed */
+ if (tx_pthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
- if (tx_wthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_thresh.wthresh = tx_wthresh;
+ if (tx_hthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
- if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_rs_thresh = tx_rs_thresh;
+ if (tx_wthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
- if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_free_thresh = tx_free_thresh;
+ if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
+
+ if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
+
+ port->nb_tx_desc[qid] = nb_txd;
+ }
}
void
@@ -2180,13 +2355,16 @@ init_port_config(void)
{
portid_t pid;
struct rte_port *port;
+ struct rte_eth_dev_info dev_info;
RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
port->dev_conf.fdir_conf = fdir_conf;
if (nb_rxq > 1) {
+ rte_eth_dev_info_get(pid, &dev_info);
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
- port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
+ port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
+ rss_hf & dev_info.flow_type_rss_offloads;
} else {
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
@@ -2251,7 +2429,10 @@ uint8_t port_is_bonding_slave(portid_t slave_pid)
struct rte_port *port;
port = &ports[slave_pid];
- return port->slave_flag;
+ if ((rte_eth_devices[slave_pid].data->dev_flags &
+ RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
+ return 1;
+ return 0;
}
const uint16_t vlan_tags[] = {
@@ -2354,12 +2535,8 @@ init_port_dcb_config(portid_t pid,
return retval;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
- /**
- * Write the configuration into the device.
- * Set the numbers of RX & TX queues to 0, so
- * the RX & TX queues will not be setup.
- */
- rte_eth_dev_configure(pid, 0, 0, &port_conf);
+ /* re-configure the device . */
+ rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
rte_eth_dev_info_get(pid, &rte_port->dev_info);
@@ -2474,8 +2651,9 @@ signal_handler(int signum)
int
main(int argc, char** argv)
{
- int diag;
+ int diag;
portid_t port_id;
+ int ret;
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
@@ -2489,17 +2667,12 @@ main(int argc, char** argv)
rte_panic("Cannot register log type");
rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
- if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
- TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
- strerror(errno));
- }
-
#ifdef RTE_LIBRTE_PDUMP
/* initialize packet capture framework */
rte_pdump_init(NULL);
#endif
- nb_ports = (portid_t) rte_eth_dev_count();
+ nb_ports = (portid_t) rte_eth_dev_count_avail();
if (nb_ports == 0)
TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
@@ -2519,11 +2692,23 @@ main(int argc, char** argv)
latencystats_enabled = 0;
#endif
+ /* on FreeBSD, mlockall() is disabled by default */
+#ifdef RTE_EXEC_ENV_BSDAPP
+ do_mlockall = 0;
+#else
+ do_mlockall = 1;
+#endif
+
argc -= diag;
argv += diag;
if (argc > 1)
launch_args_parse(argc, argv);
+ if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
+ TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
+ strerror(errno));
+ }
+
if (tx_first && interactive)
rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
"interactive mode.\n");
@@ -2543,6 +2728,18 @@ main(int argc, char** argv)
nb_rxq, nb_txq);
init_config();
+
+ if (hot_plug) {
+ /* enable hot plug monitoring */
+ ret = rte_dev_event_monitor_start();
+ if (ret) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ eth_dev_event_callback_register();
+
+ }
+
if (start_port(RTE_PORT_ALL) != 0)
rte_exit(EXIT_FAILURE, "Start ports failed\n");
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 153abea0..f51cd9dd 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -79,6 +79,19 @@ struct pkt_burst_stats {
};
#endif
+/** Information for a given RSS type. */
+struct rss_type_info {
+ const char *str; /**< Type name. */
+ uint64_t rss_type; /**< Type value. */
+};
+
+/**
+ * RSS type information table.
+ *
+ * An entry with a NULL type name terminates the list.
+ */
+extern const struct rss_type_info rss_type_table[];
+
/**
* The data structure associated with a forwarding stream between a receive
* port/queue and a transmit port/queue.
@@ -181,8 +194,10 @@ struct rte_port {
uint8_t need_reconfig_queues; /**< need reconfiguring queues or not */
uint8_t rss_flag; /**< enable rss or not */
uint8_t dcb_flag; /**< enable dcb */
- struct rte_eth_rxconf rx_conf; /**< rx configuration */
- struct rte_eth_txconf tx_conf; /**< tx configuration */
+ uint16_t nb_rx_desc[MAX_QUEUE_ID+1]; /**< per queue rx desc number */
+ uint16_t nb_tx_desc[MAX_QUEUE_ID+1]; /**< per queue tx desc number */
+ struct rte_eth_rxconf rx_conf[MAX_QUEUE_ID+1]; /**< per queue rx configuration */
+ struct rte_eth_txconf tx_conf[MAX_QUEUE_ID+1]; /**< per queue tx configuration */
struct ether_addr *mc_addr_pool; /**< pool of multicast addrs */
uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
uint8_t slave_flag; /**< bonding slave port */
@@ -320,6 +335,8 @@ extern uint8_t lsc_interrupt; /**< disabled by "--no-lsc-interrupt" parameter */
extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */
extern uint32_t event_print_mask;
/**< set by "--print-event xxxx" and "--mask-event xxxx parameters */
+extern uint8_t hot_plug; /**< enable by "--hot-plug" parameter */
+extern int do_mlockall; /**< set by "--mlockall" or "--no-mlockall" parameter */
#ifdef RTE_LIBRTE_IXGBE_BYPASS
extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */
@@ -433,6 +450,8 @@ extern uint32_t retry_enabled;
extern struct fwd_lcore **fwd_lcores;
extern struct fwd_stream **fwd_streams;
+extern uint16_t vxlan_gpe_udp_port; /**< UDP port of tunnel VXLAN-GPE. */
+
extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */
extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
@@ -500,12 +519,25 @@ mbuf_pool_find(unsigned int sock_id)
static inline uint32_t
port_pci_reg_read(struct rte_port *port, uint32_t reg_off)
{
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus;
void *reg_addr;
uint32_t reg_v;
- reg_addr = (void *)
- ((char *)port->dev_info.pci_dev->mem_resource[0].addr +
- reg_off);
+ if (!port->dev_info.device) {
+ printf("Invalid device\n");
+ return 0;
+ }
+
+ bus = rte_bus_find_by_device(port->dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(port->dev_info.device);
+ } else {
+ printf("Not a PCI device\n");
+ return 0;
+ }
+
+ reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off);
reg_v = *((volatile uint32_t *)reg_addr);
return rte_le_to_cpu_32(reg_v);
}
@@ -516,11 +548,24 @@ port_pci_reg_read(struct rte_port *port, uint32_t reg_off)
static inline void
port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v)
{
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus;
void *reg_addr;
- reg_addr = (void *)
- ((char *)port->dev_info.pci_dev->mem_resource[0].addr +
- reg_off);
+ if (!port->dev_info.device) {
+ printf("Invalid device\n");
+ return;
+ }
+
+ bus = rte_bus_find_by_device(port->dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(port->dev_info.device);
+ } else {
+ printf("Not a PCI device\n");
+ return;
+ }
+
+ reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off);
*((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v);
}
@@ -551,6 +596,7 @@ void fwd_config_setup(void);
void set_def_fwd_config(void);
void reconfig(portid_t new_port_id, unsigned socket_id);
int init_fwd_streams(void);
+void update_fwd_ports(portid_t new_pid);
void set_fwd_eth_peer(portid_t port_id, char *peer_addr);
@@ -575,7 +621,7 @@ int port_flow_create(portid_t port_id,
int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule);
int port_flow_flush(portid_t port_id);
int port_flow_query(portid_t port_id, uint32_t rule,
- enum rte_flow_action_type action);
+ const struct rte_flow_action *action);
void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group);
int port_flow_isolate(portid_t port_id, int set);
@@ -681,6 +727,7 @@ enum print_warning {
DISABLED_WARN
};
int port_id_is_invalid(portid_t port_id, enum print_warning warning);
+void print_valid_ports(void);
int new_socket_id(unsigned int socket_id);
queueid_t get_allowed_max_nb_rxq(portid_t *pid);