aboutsummaryrefslogtreecommitdiffstats
path: root/app/test-pmd
diff options
context:
space:
mode:
Diffstat (limited to 'app/test-pmd')
-rw-r--r--app/test-pmd/Makefile5
-rw-r--r--app/test-pmd/bpf_cmd.c27
-rw-r--r--app/test-pmd/cmdline.c410
-rw-r--r--app/test-pmd/cmdline_flow.c303
-rw-r--r--app/test-pmd/cmdline_tm.c37
-rw-r--r--app/test-pmd/config.c69
-rw-r--r--app/test-pmd/csumonly.c2
-rw-r--r--app/test-pmd/meson.build6
-rw-r--r--app/test-pmd/parameters.c4
-rw-r--r--app/test-pmd/softnicfwd.c (renamed from app/test-pmd/tm.c)408
-rw-r--r--app/test-pmd/testpmd.c112
-rw-r--r--app/test-pmd/testpmd.h76
12 files changed, 1057 insertions, 402 deletions
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index a5a827bb..2b4d604b 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -13,6 +13,7 @@ APP = testpmd
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-deprecated-declarations
#
# all source are stored in SRCS-y
@@ -35,8 +36,8 @@ SRCS-y += icmpecho.c
SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_cmd.c
-ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC)$(CONFIG_RTE_LIBRTE_SCHED),yy)
-SRCS-y += tm.c
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC), y)
+SRCS-y += softnicfwd.c
endif
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
diff --git a/app/test-pmd/bpf_cmd.c b/app/test-pmd/bpf_cmd.c
index 584fad90..830bfc13 100644
--- a/app/test-pmd/bpf_cmd.c
+++ b/app/test-pmd/bpf_cmd.c
@@ -19,12 +19,35 @@ static const struct rte_bpf_xsym bpf_xsym[] = {
{
.name = RTE_STR(stdout),
.type = RTE_BPF_XTYPE_VAR,
- .var = &stdout,
+ .var = {
+ .val = &stdout,
+ .desc = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(stdout),
+ },
+ },
},
{
.name = RTE_STR(rte_pktmbuf_dump),
.type = RTE_BPF_XTYPE_FUNC,
- .func = (void *)rte_pktmbuf_dump,
+ .func = {
+ .val = (void *)rte_pktmbuf_dump,
+ .nb_args = 3,
+ .args = {
+ [0] = {
+ .type = RTE_BPF_ARG_RAW,
+ .size = sizeof(uintptr_t),
+ },
+ [1] = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .size = sizeof(struct rte_mbuf),
+ },
+ [2] = {
+ .type = RTE_BPF_ARG_RAW,
+ .size = sizeof(uint32_t),
+ },
+ },
+ },
},
};
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 27e2aa8c..589121d6 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -781,6 +781,27 @@ static void cmd_help_long_parsed(void *parsed_result,
"port tm hierarchy commit (port_id) (clean_on_fail)\n"
" Commit tm hierarchy.\n\n"
+ "vxlan ip-version (ipv4|ipv6) vni (vni) udp-src"
+ " (udp-src) udp-dst (udp-dst) ip-src (ip-src) ip-dst"
+ " (ip-dst) eth-src (eth-src) eth-dst (eth-dst)\n"
+ " Configure the VXLAN encapsulation for flows.\n\n"
+
+ "vxlan-with-vlan ip-version (ipv4|ipv6) vni (vni)"
+ " udp-src (udp-src) udp-dst (udp-dst) ip-src (ip-src)"
+ " ip-dst (ip-dst) vlan-tci (vlan-tci) eth-src (eth-src)"
+ " eth-dst (eth-dst)\n"
+ " Configure the VXLAN encapsulation for flows.\n\n"
+
+ "nvgre ip-version (ipv4|ipv6) tni (tni) ip-src"
+ " (ip-src) ip-dst (ip-dst) eth-src (eth-src) eth-dst"
+ " (eth-dst)\n"
+ " Configure the NVGRE encapsulation for flows.\n\n"
+
+ "nvgre-with-vlan ip-version (ipv4|ipv6) tni (tni)"
+ " ip-src (ip-src) ip-dst (ip-dst) vlan-tci (vlan-tci)"
+ " eth-src (eth-src) eth-dst (eth-dst)\n"
+ " Configure the NVGRE encapsulation for flows.\n\n"
+
, list_pkt_forwarding_modes()
);
}
@@ -1876,11 +1897,13 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
port = &ports[pid];
rx_offloads = port->dev_conf.rxmode.offloads;
if (!strcmp(res->name, "crc-strip")) {
- if (!strcmp(res->value, "on"))
+ if (!strcmp(res->value, "on")) {
rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- else if (!strcmp(res->value, "off"))
+ rx_offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ } else if (!strcmp(res->value, "off")) {
+ rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
- else {
+ } else {
printf("Unknown parameter\n");
return;
}
@@ -2058,11 +2081,21 @@ cmd_config_rss_parsed(void *parsed_result,
rss_conf.rss_key = NULL;
/* Update global configuration for RSS types. */
RTE_ETH_FOREACH_DEV(i) {
- if (use_default) {
- rte_eth_dev_info_get(i, &dev_info);
+ struct rte_eth_rss_conf local_rss_conf;
+
+ rte_eth_dev_info_get(i, &dev_info);
+ if (use_default)
rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+ local_rss_conf = rss_conf;
+ local_rss_conf.rss_hf = rss_conf.rss_hf &
+ dev_info.flow_type_rss_offloads;
+ if (local_rss_conf.rss_hf != rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ i, rss_conf.rss_hf, local_rss_conf.rss_hf);
}
- diag = rte_eth_dev_rss_hash_update(i, &rss_conf);
+ diag = rte_eth_dev_rss_hash_update(i, &local_rss_conf);
if (diag < 0) {
all_updated = 0;
printf("Configuration of RSS hash at ethernet port %d "
@@ -4792,8 +4825,9 @@ cmd_gso_show_parsed(void *parsed_result,
if (gso_ports[res->cmd_pid].enable) {
printf("Max GSO'd packet size: %uB\n"
"Supported GSO types: TCP/IPv4, "
- "VxLAN with inner TCP/IPv4 packet, "
- "GRE with inner TCP/IPv4 packet\n",
+ "UDP/IPv4, VxLAN with inner "
+ "TCP/IPv4 packet, GRE with inner "
+ "TCP/IPv4 packet\n",
gso_max_segment_size);
} else
printf("GSO is not enabled on Port %u\n", res->cmd_pid);
@@ -14806,20 +14840,14 @@ static void cmd_set_port_tm_hierarchy_default_parsed(void *parsed_result,
p = &ports[port_id];
- /* Port tm flag */
- if (p->softport.tm_flag == 0) {
- printf(" tm not enabled on port %u (error)\n", port_id);
- return;
- }
-
/* Forward mode: tm */
- if (strcmp(cur_fwd_config.fwd_eng->fwd_mode_name, "tm")) {
- printf(" tm mode not enabled(error)\n");
+ if (strcmp(cur_fwd_config.fwd_eng->fwd_mode_name, "softnic")) {
+ printf(" softnicfwd mode not enabled(error)\n");
return;
}
/* Set the default tm hierarchy */
- p->softport.tm.default_hierarchy_enable = 1;
+ p->softport.default_tm_hierarchy_enable = 1;
}
cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = {
@@ -14838,6 +14866,326 @@ cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = {
};
#endif
+/** Set VXLAN encapsulation details */
+struct cmd_set_vxlan_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vxlan;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t vlan_present:1;
+ uint32_t vni;
+ uint16_t udp_src;
+ uint16_t udp_dst;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t tci;
+ struct ether_addr eth_src;
+ struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_vxlan_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, set, "set");
+cmdline_parse_token_string_t cmd_set_vxlan_vxlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan, "vxlan");
+cmdline_parse_token_string_t cmd_set_vxlan_vxlan_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan,
+ "vxlan-with-vlan");
+cmdline_parse_token_string_t cmd_set_vxlan_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "ip-version");
+cmdline_parse_token_string_t cmd_set_vxlan_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, ip_version,
+ "ipv4#ipv6");
+cmdline_parse_token_string_t cmd_set_vxlan_vni =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "vni");
+cmdline_parse_token_num_t cmd_set_vxlan_vni_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, vni, UINT32);
+cmdline_parse_token_string_t cmd_set_vxlan_udp_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "udp-src");
+cmdline_parse_token_num_t cmd_set_vxlan_udp_src_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_src, UINT16);
+cmdline_parse_token_string_t cmd_set_vxlan_udp_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "udp-dst");
+cmdline_parse_token_num_t cmd_set_vxlan_udp_dst_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_dst, UINT16);
+cmdline_parse_token_string_t cmd_set_vxlan_ip_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "ip-src");
+cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_src_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_src);
+cmdline_parse_token_string_t cmd_set_vxlan_ip_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "ip-dst");
+cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_dst_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_dst);
+cmdline_parse_token_string_t cmd_set_vxlan_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "vlan-tci");
+cmdline_parse_token_num_t cmd_set_vxlan_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, tci, UINT16);
+cmdline_parse_token_string_t cmd_set_vxlan_eth_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "eth-src");
+cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_src_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_src);
+cmdline_parse_token_string_t cmd_set_vxlan_eth_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "eth-dst");
+cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_dst_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_dst);
+
+static void cmd_set_vxlan_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_vxlan_result *res = parsed_result;
+ union {
+ uint32_t vxlan_id;
+ uint8_t vni[4];
+ } id = {
+ .vxlan_id = rte_cpu_to_be_32(res->vni) & RTE_BE32(0x00ffffff),
+ };
+
+ if (strcmp(res->vxlan, "vxlan") == 0)
+ vxlan_encap_conf.select_vlan = 0;
+ else if (strcmp(res->vxlan, "vxlan-with-vlan") == 0)
+ vxlan_encap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ vxlan_encap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ vxlan_encap_conf.select_ipv4 = 0;
+ else
+ return;
+ rte_memcpy(vxlan_encap_conf.vni, &id.vni[1], 3);
+ vxlan_encap_conf.udp_src = rte_cpu_to_be_16(res->udp_src);
+ vxlan_encap_conf.udp_dst = rte_cpu_to_be_16(res->udp_dst);
+ if (vxlan_encap_conf.select_ipv4) {
+ IPV4_ADDR_TO_UINT(res->ip_src, vxlan_encap_conf.ipv4_src);
+ IPV4_ADDR_TO_UINT(res->ip_dst, vxlan_encap_conf.ipv4_dst);
+ } else {
+ IPV6_ADDR_TO_ARRAY(res->ip_src, vxlan_encap_conf.ipv6_src);
+ IPV6_ADDR_TO_ARRAY(res->ip_dst, vxlan_encap_conf.ipv6_dst);
+ }
+ if (vxlan_encap_conf.select_vlan)
+ vxlan_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
+ rte_memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+ ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_vxlan = {
+ .f = cmd_set_vxlan_parsed,
+ .data = NULL,
+ .help_str = "set vxlan ip-version ipv4|ipv6 vni <vni> udp-src"
+ " <udp-src> udp-dst <udp-dst> ip-src <ip-src> ip-dst <ip-dst>"
+ " eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_vxlan_set,
+ (void *)&cmd_set_vxlan_vxlan,
+ (void *)&cmd_set_vxlan_ip_version,
+ (void *)&cmd_set_vxlan_ip_version_value,
+ (void *)&cmd_set_vxlan_vni,
+ (void *)&cmd_set_vxlan_vni_value,
+ (void *)&cmd_set_vxlan_udp_src,
+ (void *)&cmd_set_vxlan_udp_src_value,
+ (void *)&cmd_set_vxlan_udp_dst,
+ (void *)&cmd_set_vxlan_udp_dst_value,
+ (void *)&cmd_set_vxlan_ip_src,
+ (void *)&cmd_set_vxlan_ip_src_value,
+ (void *)&cmd_set_vxlan_ip_dst,
+ (void *)&cmd_set_vxlan_ip_dst_value,
+ (void *)&cmd_set_vxlan_eth_src,
+ (void *)&cmd_set_vxlan_eth_src_value,
+ (void *)&cmd_set_vxlan_eth_dst,
+ (void *)&cmd_set_vxlan_eth_dst_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_vxlan_with_vlan = {
+ .f = cmd_set_vxlan_parsed,
+ .data = NULL,
+ .help_str = "set vxlan-with-vlan ip-version ipv4|ipv6 vni <vni>"
+ " udp-src <udp-src> udp-dst <udp-dst> ip-src <ip-src> ip-dst"
+ " <ip-dst> vlan-tci <vlan-tci> eth-src <eth-src> eth-dst"
+ " <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_vxlan_set,
+ (void *)&cmd_set_vxlan_vxlan_with_vlan,
+ (void *)&cmd_set_vxlan_ip_version,
+ (void *)&cmd_set_vxlan_ip_version_value,
+ (void *)&cmd_set_vxlan_vni,
+ (void *)&cmd_set_vxlan_vni_value,
+ (void *)&cmd_set_vxlan_udp_src,
+ (void *)&cmd_set_vxlan_udp_src_value,
+ (void *)&cmd_set_vxlan_udp_dst,
+ (void *)&cmd_set_vxlan_udp_dst_value,
+ (void *)&cmd_set_vxlan_ip_src,
+ (void *)&cmd_set_vxlan_ip_src_value,
+ (void *)&cmd_set_vxlan_ip_dst,
+ (void *)&cmd_set_vxlan_ip_dst_value,
+ (void *)&cmd_set_vxlan_vlan,
+ (void *)&cmd_set_vxlan_vlan_value,
+ (void *)&cmd_set_vxlan_eth_src,
+ (void *)&cmd_set_vxlan_eth_src_value,
+ (void *)&cmd_set_vxlan_eth_dst,
+ (void *)&cmd_set_vxlan_eth_dst_value,
+ NULL,
+ },
+};
+
+/** Set NVGRE encapsulation details */
+struct cmd_set_nvgre_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t nvgre;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t tni;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t tci;
+ struct ether_addr eth_src;
+ struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_nvgre_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, set, "set");
+cmdline_parse_token_string_t cmd_set_nvgre_nvgre =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre, "nvgre");
+cmdline_parse_token_string_t cmd_set_nvgre_nvgre_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre,
+ "nvgre-with-vlan");
+cmdline_parse_token_string_t cmd_set_nvgre_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "ip-version");
+cmdline_parse_token_string_t cmd_set_nvgre_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, ip_version,
+ "ipv4#ipv6");
+cmdline_parse_token_string_t cmd_set_nvgre_tni =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "tni");
+cmdline_parse_token_num_t cmd_set_nvgre_tni_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tni, UINT32);
+cmdline_parse_token_string_t cmd_set_nvgre_ip_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "ip-src");
+cmdline_parse_token_num_t cmd_set_nvgre_ip_src_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_src);
+cmdline_parse_token_string_t cmd_set_nvgre_ip_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "ip-dst");
+cmdline_parse_token_ipaddr_t cmd_set_nvgre_ip_dst_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_dst);
+cmdline_parse_token_string_t cmd_set_nvgre_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "vlan-tci");
+cmdline_parse_token_num_t cmd_set_nvgre_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tci, UINT16);
+cmdline_parse_token_string_t cmd_set_nvgre_eth_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "eth-src");
+cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_src_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_src);
+cmdline_parse_token_string_t cmd_set_nvgre_eth_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "eth-dst");
+cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_dst_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_dst);
+
+static void cmd_set_nvgre_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_nvgre_result *res = parsed_result;
+ union {
+ uint32_t nvgre_tni;
+ uint8_t tni[4];
+ } id = {
+ .nvgre_tni = rte_cpu_to_be_32(res->tni) & RTE_BE32(0x00ffffff),
+ };
+
+ if (strcmp(res->nvgre, "nvgre") == 0)
+ nvgre_encap_conf.select_vlan = 0;
+ else if (strcmp(res->nvgre, "nvgre-with-vlan") == 0)
+ nvgre_encap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ nvgre_encap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ nvgre_encap_conf.select_ipv4 = 0;
+ else
+ return;
+ rte_memcpy(nvgre_encap_conf.tni, &id.tni[1], 3);
+ if (nvgre_encap_conf.select_ipv4) {
+ IPV4_ADDR_TO_UINT(res->ip_src, nvgre_encap_conf.ipv4_src);
+ IPV4_ADDR_TO_UINT(res->ip_dst, nvgre_encap_conf.ipv4_dst);
+ } else {
+ IPV6_ADDR_TO_ARRAY(res->ip_src, nvgre_encap_conf.ipv6_src);
+ IPV6_ADDR_TO_ARRAY(res->ip_dst, nvgre_encap_conf.ipv6_dst);
+ }
+ if (nvgre_encap_conf.select_vlan)
+ nvgre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
+ rte_memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+ ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_nvgre = {
+ .f = cmd_set_nvgre_parsed,
+ .data = NULL,
+ .help_str = "set nvgre ip-version <ipv4|ipv6> tni <tni> ip-src"
+ " <ip-src> ip-dst <ip-dst> eth-src <eth-src>"
+ " eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_nvgre_set,
+ (void *)&cmd_set_nvgre_nvgre,
+ (void *)&cmd_set_nvgre_ip_version,
+ (void *)&cmd_set_nvgre_ip_version_value,
+ (void *)&cmd_set_nvgre_tni,
+ (void *)&cmd_set_nvgre_tni_value,
+ (void *)&cmd_set_nvgre_ip_src,
+ (void *)&cmd_set_nvgre_ip_src_value,
+ (void *)&cmd_set_nvgre_ip_dst,
+ (void *)&cmd_set_nvgre_ip_dst_value,
+ (void *)&cmd_set_nvgre_eth_src,
+ (void *)&cmd_set_nvgre_eth_src_value,
+ (void *)&cmd_set_nvgre_eth_dst,
+ (void *)&cmd_set_nvgre_eth_dst_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_nvgre_with_vlan = {
+ .f = cmd_set_nvgre_parsed,
+ .data = NULL,
+ .help_str = "set nvgre-with-vlan ip-version <ipv4|ipv6> tni <tni>"
+ " ip-src <ip-src> ip-dst <ip-dst> vlan-tci <vlan-tci>"
+ " eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_nvgre_set,
+ (void *)&cmd_set_nvgre_nvgre_with_vlan,
+ (void *)&cmd_set_nvgre_ip_version,
+ (void *)&cmd_set_nvgre_ip_version_value,
+ (void *)&cmd_set_nvgre_tni,
+ (void *)&cmd_set_nvgre_tni_value,
+ (void *)&cmd_set_nvgre_ip_src,
+ (void *)&cmd_set_nvgre_ip_src_value,
+ (void *)&cmd_set_nvgre_ip_dst,
+ (void *)&cmd_set_nvgre_ip_dst_value,
+ (void *)&cmd_set_nvgre_vlan,
+ (void *)&cmd_set_nvgre_vlan_value,
+ (void *)&cmd_set_nvgre_eth_src,
+ (void *)&cmd_set_nvgre_eth_src_value,
+ (void *)&cmd_set_nvgre_eth_dst,
+ (void *)&cmd_set_nvgre_eth_dst_value,
+ NULL,
+ },
+};
+
/* Strict link priority scheduling mode setting */
static void
cmd_strict_link_prio_parsed(
@@ -16657,7 +17005,7 @@ cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_offload =
offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
"qinq_strip#outer_ipv4_cksum#macsec_strip#"
"header_split#vlan_filter#vlan_extend#jumbo_frame#"
- "crc_strip#scatter#timestamp#security");
+ "crc_strip#scatter#timestamp#security#keep_crc");
cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_port_rx_offload_result,
@@ -16736,7 +17084,7 @@ cmdline_parse_inst_t cmd_config_per_port_rx_offload = {
.help_str = "port config <port_id> rx_offload vlan_strip|ipv4_cksum|"
"udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
"macsec_strip|header_split|vlan_filter|vlan_extend|"
- "jumbo_frame|crc_strip|scatter|timestamp|security "
+ "jumbo_frame|crc_strip|scatter|timestamp|security|keep_crc "
"on|off",
.tokens = {
(void *)&cmd_config_per_port_rx_offload_result_port,
@@ -16786,7 +17134,7 @@ cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_offload =
offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
"qinq_strip#outer_ipv4_cksum#macsec_strip#"
"header_split#vlan_filter#vlan_extend#jumbo_frame#"
- "crc_strip#scatter#timestamp#security");
+ "crc_strip#scatter#timestamp#security#keep_crc");
cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_queue_rx_offload_result,
@@ -16838,7 +17186,7 @@ cmdline_parse_inst_t cmd_config_per_queue_rx_offload = {
"vlan_strip|ipv4_cksum|"
"udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
"macsec_strip|header_split|vlan_filter|vlan_extend|"
- "jumbo_frame|crc_strip|scatter|timestamp|security "
+ "jumbo_frame|crc_strip|scatter|timestamp|security|keep_crc "
"on|off",
.tokens = {
(void *)&cmd_config_per_queue_rx_offload_result_port,
@@ -17051,11 +17399,11 @@ cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_tx_offload =
cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_offload =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_port_tx_offload_result,
- offload, "vlan_insert#ipv4_cksum#udp_cksum#udp_cksum#"
+ offload, "vlan_insert#ipv4_cksum#udp_cksum#tcp_cksum#"
"sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#"
"qinq_insert#vxlan_tnl_tso#gre_tnl_tso#"
"ipip_tnl_tso#geneve_tnl_tso#macsec_insert#"
- "mt_lockfree#multi_segs#fast_free#security");
+ "mt_lockfree#multi_segs#mbuf_fast_free#security");
cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_port_tx_offload_result,
@@ -17132,11 +17480,11 @@ cmdline_parse_inst_t cmd_config_per_port_tx_offload = {
.f = cmd_config_per_port_tx_offload_parsed,
.data = NULL,
.help_str = "port config <port_id> tx_offload "
- "vlan_insert|ipv4_cksum|udp_cksum|udp_cksum|"
+ "vlan_insert|ipv4_cksum|udp_cksum|tcp_cksum|"
"sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|"
"qinq_insert|vxlan_tnl_tso|gre_tnl_tso|"
"ipip_tnl_tso|geneve_tnl_tso|macsec_insert|"
- "mt_lockfree|multi_segs|fast_free|security "
+ "mt_lockfree|multi_segs|mbuf_fast_free|security "
"on|off",
.tokens = {
(void *)&cmd_config_per_port_tx_offload_result_port,
@@ -17183,11 +17531,11 @@ cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txoffload =
cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_offload =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_queue_tx_offload_result,
- offload, "vlan_insert#ipv4_cksum#udp_cksum#udp_cksum#"
+ offload, "vlan_insert#ipv4_cksum#udp_cksum#tcp_cksum#"
"sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#"
"qinq_insert#vxlan_tnl_tso#gre_tnl_tso#"
"ipip_tnl_tso#geneve_tnl_tso#macsec_insert#"
- "mt_lockfree#multi_segs#fast_free#security");
+ "mt_lockfree#multi_segs#mbuf_fast_free#security");
cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_queue_tx_offload_result,
@@ -17236,11 +17584,11 @@ cmdline_parse_inst_t cmd_config_per_queue_tx_offload = {
.f = cmd_config_per_queue_tx_offload_parsed,
.data = NULL,
.help_str = "port <port_id> txq <queue_id> tx_offload "
- "vlan_insert|ipv4_cksum|udp_cksum|udp_cksum|"
+ "vlan_insert|ipv4_cksum|udp_cksum|tcp_cksum|"
"sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|"
"qinq_insert|vxlan_tnl_tso|gre_tnl_tso|"
"ipip_tnl_tso|geneve_tnl_tso|macsec_insert|"
- "mt_lockfree|multi_segs|fast_free|security "
+ "mt_lockfree|multi_segs|mbuf_fast_free|security "
"on|off",
.tokens = {
(void *)&cmd_config_per_queue_tx_offload_result_port,
@@ -17462,6 +17810,10 @@ cmdline_parse_ctx_t main_ctx[] = {
#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
(cmdline_parse_inst_t *)&cmd_set_port_tm_hierarchy_default,
#endif
+ (cmdline_parse_inst_t *)&cmd_set_vxlan,
+ (cmdline_parse_inst_t *)&cmd_set_vxlan_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_nvgre,
+ (cmdline_parse_inst_t *)&cmd_set_nvgre_with_vlan,
(cmdline_parse_inst_t *)&cmd_ddp_add,
(cmdline_parse_inst_t *)&cmd_ddp_del,
(cmdline_parse_inst_t *)&cmd_ddp_get_list,
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 9918d7fd..f9260600 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -194,6 +194,8 @@ enum index {
ACTION_QUEUE_INDEX,
ACTION_DROP,
ACTION_COUNT,
+ ACTION_COUNT_SHARED,
+ ACTION_COUNT_ID,
ACTION_RSS,
ACTION_RSS_FUNC,
ACTION_RSS_LEVEL,
@@ -237,6 +239,10 @@ enum index {
ACTION_OF_POP_MPLS_ETHERTYPE,
ACTION_OF_PUSH_MPLS,
ACTION_OF_PUSH_MPLS_ETHERTYPE,
+ ACTION_VXLAN_ENCAP,
+ ACTION_VXLAN_DECAP,
+ ACTION_NVGRE_ENCAP,
+ ACTION_NVGRE_DECAP,
};
/** Maximum size for pattern in struct rte_flow_item_raw. */
@@ -256,6 +262,39 @@ struct action_rss_data {
uint16_t queue[ACTION_RSS_QUEUE_NUM];
};
+/** Maximum number of items in struct rte_flow_action_vxlan_encap. */
+#define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
+
+/** Storage for struct rte_flow_action_vxlan_encap including external data. */
+struct action_vxlan_encap_data {
+ struct rte_flow_action_vxlan_encap conf;
+ struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
+ struct rte_flow_item_eth item_eth;
+ struct rte_flow_item_vlan item_vlan;
+ union {
+ struct rte_flow_item_ipv4 item_ipv4;
+ struct rte_flow_item_ipv6 item_ipv6;
+ };
+ struct rte_flow_item_udp item_udp;
+ struct rte_flow_item_vxlan item_vxlan;
+};
+
+/** Maximum number of items in struct rte_flow_action_nvgre_encap. */
+#define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
+
+/** Storage for struct rte_flow_action_nvgre_encap including external data. */
+struct action_nvgre_encap_data {
+ struct rte_flow_action_nvgre_encap conf;
+ struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
+ struct rte_flow_item_eth item_eth;
+ struct rte_flow_item_vlan item_vlan;
+ union {
+ struct rte_flow_item_ipv4 item_ipv4;
+ struct rte_flow_item_ipv6 item_ipv6;
+ };
+ struct rte_flow_item_nvgre item_nvgre;
+};
+
/** Maximum number of subsequent tokens and arguments on the stack. */
#define CTX_STACK_SIZE 16
@@ -773,6 +812,10 @@ static const enum index next_action[] = {
ACTION_OF_SET_VLAN_PCP,
ACTION_OF_POP_MPLS,
ACTION_OF_PUSH_MPLS,
+ ACTION_VXLAN_ENCAP,
+ ACTION_VXLAN_DECAP,
+ ACTION_NVGRE_ENCAP,
+ ACTION_NVGRE_DECAP,
ZERO,
};
@@ -788,6 +831,13 @@ static const enum index action_queue[] = {
ZERO,
};
+static const enum index action_count[] = {
+ ACTION_COUNT_ID,
+ ACTION_COUNT_SHARED,
+ ACTION_NEXT,
+ ZERO,
+};
+
static const enum index action_rss[] = {
ACTION_RSS_FUNC,
ACTION_RSS_LEVEL,
@@ -896,6 +946,12 @@ static int parse_vc_action_rss_type(struct context *, const struct token *,
static int parse_vc_action_rss_queue(struct context *, const struct token *,
const char *, unsigned int, void *,
unsigned int);
+static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
static int parse_destroy(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -2022,10 +2078,26 @@ static const struct token token_list[] = {
[ACTION_COUNT] = {
.name = "count",
.help = "enable counters for this rule",
- .priv = PRIV_ACTION(COUNT, 0),
- .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .priv = PRIV_ACTION(COUNT,
+ sizeof(struct rte_flow_action_count)),
+ .next = NEXT(action_count),
.call = parse_vc,
},
+ [ACTION_COUNT_ID] = {
+ .name = "identifier",
+ .help = "counter identifier to use",
+ .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_COUNT_SHARED] = {
+ .name = "shared",
+ .help = "shared counter",
+ .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
+ shared, 1)),
+ .call = parse_vc_conf,
+ },
[ACTION_RSS] = {
.name = "rss",
.help = "spread packets among several queues",
@@ -2362,6 +2434,42 @@ static const struct token token_list[] = {
ethertype)),
.call = parse_vc_conf,
},
+ [ACTION_VXLAN_ENCAP] = {
+ .name = "vxlan_encap",
+ .help = "VXLAN encapsulation, uses configuration set by \"set"
+ " vxlan\"",
+ .priv = PRIV_ACTION(VXLAN_ENCAP,
+ sizeof(struct action_vxlan_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_vxlan_encap,
+ },
+ [ACTION_VXLAN_DECAP] = {
+ .name = "vxlan_decap",
+ .help = "Performs a decapsulation action by stripping all"
+ " headers of the VXLAN tunnel network overlay from the"
+ " matched flow.",
+ .priv = PRIV_ACTION(VXLAN_DECAP, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_NVGRE_ENCAP] = {
+ .name = "nvgre_encap",
+ .help = "NVGRE encapsulation, uses configuration set by \"set"
+ " nvgre\"",
+ .priv = PRIV_ACTION(NVGRE_ENCAP,
+ sizeof(struct action_nvgre_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_nvgre_encap,
+ },
+ [ACTION_NVGRE_DECAP] = {
+ .name = "nvgre_decap",
+ .help = "Performs a decapsulation action by stripping all"
+ " headers of the NVGRE tunnel network overlay from the"
+ " matched flow.",
+ .priv = PRIV_ACTION(NVGRE_DECAP, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
};
/** Remove and return last entry from argument stack. */
@@ -2926,6 +3034,197 @@ end:
return len;
}
+/** Parse VXLAN encap action. */
+static int
+parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_vxlan_encap_data *action_vxlan_encap_data;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Set up default configuration. */
+ action_vxlan_encap_data = ctx->object;
+ *action_vxlan_encap_data = (struct action_vxlan_encap_data){
+ .conf = (struct rte_flow_action_vxlan_encap){
+ .definition = action_vxlan_encap_data->items,
+ },
+ .items = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &action_vxlan_encap_data->item_eth,
+ .mask = &rte_flow_item_eth_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &action_vxlan_encap_data->item_vlan,
+ .mask = &rte_flow_item_vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &action_vxlan_encap_data->item_ipv4,
+ .mask = &rte_flow_item_ipv4_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &action_vxlan_encap_data->item_udp,
+ .mask = &rte_flow_item_udp_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ .spec = &action_vxlan_encap_data->item_vxlan,
+ .mask = &rte_flow_item_vxlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ .item_eth.type = 0,
+ .item_vlan = {
+ .tci = vxlan_encap_conf.vlan_tci,
+ .inner_type = 0,
+ },
+ .item_ipv4.hdr = {
+ .src_addr = vxlan_encap_conf.ipv4_src,
+ .dst_addr = vxlan_encap_conf.ipv4_dst,
+ },
+ .item_udp.hdr = {
+ .src_port = vxlan_encap_conf.udp_src,
+ .dst_port = vxlan_encap_conf.udp_dst,
+ },
+ .item_vxlan.flags = 0,
+ };
+ memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
+ vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
+ vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
+ if (!vxlan_encap_conf.select_ipv4) {
+ memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
+ &vxlan_encap_conf.ipv6_src,
+ sizeof(vxlan_encap_conf.ipv6_src));
+ memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
+ &vxlan_encap_conf.ipv6_dst,
+ sizeof(vxlan_encap_conf.ipv6_dst));
+ action_vxlan_encap_data->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &action_vxlan_encap_data->item_ipv6,
+ .mask = &rte_flow_item_ipv6_mask,
+ };
+ }
+ if (!vxlan_encap_conf.select_vlan)
+ action_vxlan_encap_data->items[1].type =
+ RTE_FLOW_ITEM_TYPE_VOID;
+ memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
+ RTE_DIM(vxlan_encap_conf.vni));
+ action->conf = &action_vxlan_encap_data->conf;
+ return ret;
+}
+
+/** Parse NVGRE encap action. */
+static int
+parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_nvgre_encap_data *action_nvgre_encap_data;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Set up default configuration. */
+ action_nvgre_encap_data = ctx->object;
+ *action_nvgre_encap_data = (struct action_nvgre_encap_data){
+ .conf = (struct rte_flow_action_nvgre_encap){
+ .definition = action_nvgre_encap_data->items,
+ },
+ .items = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &action_nvgre_encap_data->item_eth,
+ .mask = &rte_flow_item_eth_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &action_nvgre_encap_data->item_vlan,
+ .mask = &rte_flow_item_vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &action_nvgre_encap_data->item_ipv4,
+ .mask = &rte_flow_item_ipv4_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_NVGRE,
+ .spec = &action_nvgre_encap_data->item_nvgre,
+ .mask = &rte_flow_item_nvgre_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ .item_eth.type = 0,
+ .item_vlan = {
+ .tci = nvgre_encap_conf.vlan_tci,
+ .inner_type = 0,
+ },
+ .item_ipv4.hdr = {
+ .src_addr = nvgre_encap_conf.ipv4_src,
+ .dst_addr = nvgre_encap_conf.ipv4_dst,
+ },
+ .item_nvgre.flow_id = 0,
+ };
+ memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
+ nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
+ nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ if (!nvgre_encap_conf.select_ipv4) {
+ memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
+ &nvgre_encap_conf.ipv6_src,
+ sizeof(nvgre_encap_conf.ipv6_src));
+ memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
+ &nvgre_encap_conf.ipv6_dst,
+ sizeof(nvgre_encap_conf.ipv6_dst));
+ action_nvgre_encap_data->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &action_nvgre_encap_data->item_ipv6,
+ .mask = &rte_flow_item_ipv6_mask,
+ };
+ }
+ if (!nvgre_encap_conf.select_vlan)
+ action_nvgre_encap_data->items[1].type =
+ RTE_FLOW_ITEM_TYPE_VOID;
+ memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
+ RTE_DIM(nvgre_encap_conf.tni));
+ action->conf = &action_nvgre_encap_data->conf;
+ return ret;
+}
+
/** Parse tokens for destroy command. */
static int
parse_destroy(struct context *ctx, const struct token *token,
diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c
index c904e44f..631f1799 100644
--- a/app/test-pmd/cmdline_tm.c
+++ b/app/test-pmd/cmdline_tm.c
@@ -234,6 +234,7 @@ static void cmd_show_port_tm_cap_parsed(void *parsed_result,
return;
memset(&cap, 0, sizeof(struct rte_tm_capabilities));
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_capabilities_get(port_id, &cap, &error);
if (ret) {
print_err_msg(&error);
@@ -374,6 +375,7 @@ static void cmd_show_port_tm_level_cap_parsed(void *parsed_result,
return;
memset(&lcap, 0, sizeof(struct rte_tm_level_capabilities));
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_level_capabilities_get(port_id, level_id, &lcap, &error);
if (ret) {
print_err_msg(&error);
@@ -498,6 +500,7 @@ static void cmd_show_port_tm_node_cap_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Node id must be valid */
ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error);
if (ret != 0) {
@@ -615,6 +618,7 @@ static void cmd_show_port_tm_node_stats_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Port status */
if (!port_is_started(port_id)) {
printf(" Port %u not started (error)\n", port_id);
@@ -727,6 +731,7 @@ static void cmd_show_port_tm_node_type_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -832,6 +837,7 @@ static void cmd_add_port_tm_node_shaper_profile_parsed(void *parsed_result,
/* Private shaper profile params */
memset(&sp, 0, sizeof(struct rte_tm_shaper_params));
+ memset(&error, 0, sizeof(struct rte_tm_error));
sp.peak.rate = res->tb_rate;
sp.peak.size = res->tb_size;
sp.pkt_length_adjust = pkt_len_adjust;
@@ -919,6 +925,7 @@ static void cmd_del_port_tm_node_shaper_profile_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_shaper_profile_delete(port_id, shaper_id, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -1004,6 +1011,7 @@ static void cmd_add_port_tm_node_shared_shaper_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Command type: add */
if ((strcmp(res->cmd_type, "add") == 0) &&
(port_is_started(port_id))) {
@@ -1098,6 +1106,7 @@ static void cmd_del_port_tm_node_shared_shaper_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_shared_shaper_delete(port_id, shared_shaper_id, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -1254,6 +1263,7 @@ static void cmd_add_port_tm_node_wred_profile_parsed(void *parsed_result,
return;
memset(&wp, 0, sizeof(struct rte_tm_wred_params));
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* WRED Params (Green Color)*/
color = RTE_TM_GREEN;
@@ -1369,6 +1379,7 @@ static void cmd_del_port_tm_node_wred_profile_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_wred_profile_delete(port_id, wred_profile_id, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -1455,6 +1466,7 @@ static void cmd_set_port_tm_node_shaper_profile_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Port status */
if (!port_is_started(port_id)) {
printf(" Port %u not started (error)\n", port_id);
@@ -1571,6 +1583,7 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result,
return;
memset(&np, 0, sizeof(struct rte_tm_node_params));
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Node parameters */
if (res->parent_node_id < 0)
@@ -1599,10 +1612,12 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result,
np.shaper_profile_id = res->shaper_profile_id;
np.n_shared_shapers = n_shared_shapers;
- if (np.n_shared_shapers)
+ if (np.n_shared_shapers) {
np.shared_shaper_id = &shared_shaper_id[0];
- else
- np.shared_shaper_id = NULL;
+ } else {
+ free(shared_shaper_id);
+ shared_shaper_id = NULL;
+ }
np.nonleaf.n_sp_priorities = res->n_sp_priorities;
np.stats_mask = res->stats_mask;
@@ -1729,6 +1744,7 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result,
return;
memset(&np, 0, sizeof(struct rte_tm_node_params));
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Node parameters */
if (res->parent_node_id < 0)
@@ -1758,10 +1774,12 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result,
np.n_shared_shapers = n_shared_shapers;
- if (np.n_shared_shapers)
+ if (np.n_shared_shapers) {
np.shared_shaper_id = &shared_shaper_id[0];
- else
- np.shared_shaper_id = NULL;
+ } else {
+ free(shared_shaper_id);
+ shared_shaper_id = NULL;
+ }
np.leaf.cman = res->cman_mode;
np.leaf.wred.wred_profile_id = res->wred_profile_id;
@@ -1844,6 +1862,7 @@ static void cmd_del_port_tm_node_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Port status */
if (port_is_started(port_id)) {
printf(" Port %u not stopped (error)\n", port_id);
@@ -1933,6 +1952,7 @@ static void cmd_set_port_tm_node_parent_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Port status */
if (!port_is_started(port_id)) {
printf(" Port %u not started (error)\n", port_id);
@@ -2008,6 +2028,7 @@ static void cmd_suspend_port_tm_node_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_node_suspend(port_id, node_id, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -2072,6 +2093,7 @@ static void cmd_resume_port_tm_node_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_node_resume(port_id, node_id, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -2143,6 +2165,7 @@ static void cmd_port_tm_hierarchy_commit_parsed(void *parsed_result,
else
clean_on_fail = 0;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_hierarchy_commit(port_id, clean_on_fail, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -2153,7 +2176,7 @@ static void cmd_port_tm_hierarchy_commit_parsed(void *parsed_result,
cmdline_parse_inst_t cmd_port_tm_hierarchy_commit = {
.f = cmd_port_tm_hierarchy_commit_parsed,
.data = NULL,
- .help_str = "Set port tm node shaper profile",
+ .help_str = "Commit port tm hierarchy",
.tokens = {
(void *)&cmd_port_tm_hierarchy_commit_port,
(void *)&cmd_port_tm_hierarchy_commit_tm,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 97020fb3..14ccd686 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2177,15 +2177,11 @@ rss_fwd_config_setup(void)
fs->tx_queue = rxq;
fs->peer_addr = fs->tx_port;
fs->retry_enabled = retry_enabled;
- rxq = (queueid_t) (rxq + 1);
- if (rxq < nb_q)
- continue;
- /*
- * rxq == nb_q
- * Restart from RX queue 0 on next RX port
- */
- rxq = 0;
rxp++;
+ if (rxp < nb_fwd_ports)
+ continue;
+ rxp = 0;
+ rxq++;
}
}
@@ -2332,6 +2328,55 @@ icmp_echo_config_setup(void)
}
}
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+static void
+softnic_fwd_config_setup(void)
+{
+ struct rte_port *port;
+ portid_t pid, softnic_portid;
+ queueid_t i;
+ uint8_t softnic_enable = 0;
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ port = &ports[pid];
+ const char *driver = port->dev_info.driver_name;
+
+ if (strcmp(driver, "net_softnic") == 0) {
+ softnic_portid = pid;
+ softnic_enable = 1;
+ break;
+ }
+ }
+
+ if (softnic_enable == 0) {
+ printf("Softnic mode not configured(%s)!\n", __func__);
+ return;
+ }
+
+ cur_fwd_config.nb_fwd_ports = 1;
+ cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
+
+ /* Re-initialize forwarding streams */
+ init_fwd_streams();
+
+ /*
+ * In the softnic forwarding test, the number of forwarding cores
+ * is set to one and remaining are used for softnic packet processing.
+ */
+ cur_fwd_config.nb_fwd_lcores = 1;
+ setup_fwd_config_of_each_lcore(&cur_fwd_config);
+
+ for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
+ fwd_streams[i]->rx_port = softnic_portid;
+ fwd_streams[i]->rx_queue = i;
+ fwd_streams[i]->tx_port = softnic_portid;
+ fwd_streams[i]->tx_queue = i;
+ fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
+ fwd_streams[i]->retry_enabled = retry_enabled;
+ }
+}
+#endif
+
void
fwd_config_setup(void)
{
@@ -2340,6 +2385,14 @@ fwd_config_setup(void)
icmp_echo_config_setup();
return;
}
+
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
+ softnic_fwd_config_setup();
+ return;
+ }
+#endif
+
if ((nb_rxq > 1) && (nb_txq > 1)){
if (dcb_config)
dcb_fwd_config_setup();
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 0bb88cf7..49482926 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -411,6 +411,8 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
info->ethertype);
}
}
+ if (info->gso_enable)
+ ol_flags |= PKT_TX_UDP_SEG;
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
tcp_hdr->cksum = 0;
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index a51514b0..a0b3be07 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -4,6 +4,7 @@
# override default name to drop the hyphen
name = 'testpmd'
allow_experimental_apis = true
+cflags += '-Wno-deprecated-declarations'
sources = files('cmdline.c',
'cmdline_flow.c',
'cmdline_mtr.c',
@@ -25,6 +26,9 @@ deps = ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci']
if dpdk_conf.has('RTE_LIBRTE_PDUMP')
deps += 'pdump'
endif
+if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD')
+ deps += 'pmd_bnxt'
+endif
if dpdk_conf.has('RTE_LIBRTE_I40E_PMD')
deps += 'pmd_i40e'
endif
@@ -32,7 +36,7 @@ if dpdk_conf.has('RTE_LIBRTE_IXGBE_PMD')
deps += 'pmd_ixgbe'
endif
if dpdk_conf.has('RTE_LIBRTE_SOFTNIC_PMD')
- sources += files('tm.c')
+ sources += files('softnicfwd.c')
deps += 'pmd_softnic'
endif
if dpdk_conf.has('RTE_LIBRTE_DPAA_PMD')
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 75807623..962fad78 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -878,8 +878,10 @@ launch_args_parse(int argc, char** argv)
" must be >= 0\n", n);
}
#endif
- if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
+ if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip")) {
rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ }
if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
diff --git a/app/test-pmd/tm.c b/app/test-pmd/softnicfwd.c
index 7231552a..7ff62280 100644
--- a/app/test-pmd/tm.c
+++ b/app/test-pmd/softnicfwd.c
@@ -6,6 +6,7 @@
#include <rte_cycles.h>
#include <rte_mbuf.h>
+#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_flow.h>
#include <rte_meter.h>
@@ -71,170 +72,17 @@ struct tm_hierarchy {
uint32_t n_shapers;
};
-#define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
-({ \
- uint64_t slab = *((uint64_t *) &byte_array[slab_pos]); \
- uint64_t val = \
- (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
- val; \
-})
-
-#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \
- traffic_class, queue, color) \
- ((((uint64_t) (queue)) & 0x3) | \
- ((((uint64_t) (traffic_class)) & 0x3) << 2) | \
- ((((uint64_t) (color)) & 0x3) << 4) | \
- ((((uint64_t) (subport)) & 0xFFFF) << 16) | \
- ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
-
-
-static void
-pkt_metadata_set(struct rte_port *p, struct rte_mbuf **pkts,
- uint32_t n_pkts)
-{
- struct softnic_port_tm *tm = &p->softport.tm;
- uint32_t i;
-
- for (i = 0; i < (n_pkts & (~0x3)); i += 4) {
- struct rte_mbuf *pkt0 = pkts[i];
- struct rte_mbuf *pkt1 = pkts[i + 1];
- struct rte_mbuf *pkt2 = pkts[i + 2];
- struct rte_mbuf *pkt3 = pkts[i + 3];
-
- uint8_t *pkt0_data = rte_pktmbuf_mtod(pkt0, uint8_t *);
- uint8_t *pkt1_data = rte_pktmbuf_mtod(pkt1, uint8_t *);
- uint8_t *pkt2_data = rte_pktmbuf_mtod(pkt2, uint8_t *);
- uint8_t *pkt3_data = rte_pktmbuf_mtod(pkt3, uint8_t *);
-
- uint64_t pkt0_subport = BITFIELD(pkt0_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt0_pipe = BITFIELD(pkt0_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt0_dscp = BITFIELD(pkt0_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt0_tc = tm->tm_tc_table[pkt0_dscp & 0x3F] >> 2;
- uint32_t pkt0_tc_q = tm->tm_tc_table[pkt0_dscp & 0x3F] & 0x3;
- uint64_t pkt1_subport = BITFIELD(pkt1_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt1_pipe = BITFIELD(pkt1_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt1_dscp = BITFIELD(pkt1_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt1_tc = tm->tm_tc_table[pkt1_dscp & 0x3F] >> 2;
- uint32_t pkt1_tc_q = tm->tm_tc_table[pkt1_dscp & 0x3F] & 0x3;
-
- uint64_t pkt2_subport = BITFIELD(pkt2_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt2_pipe = BITFIELD(pkt2_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt2_dscp = BITFIELD(pkt2_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt2_tc = tm->tm_tc_table[pkt2_dscp & 0x3F] >> 2;
- uint32_t pkt2_tc_q = tm->tm_tc_table[pkt2_dscp & 0x3F] & 0x3;
-
- uint64_t pkt3_subport = BITFIELD(pkt3_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt3_pipe = BITFIELD(pkt3_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt3_dscp = BITFIELD(pkt3_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt3_tc = tm->tm_tc_table[pkt3_dscp & 0x3F] >> 2;
- uint32_t pkt3_tc_q = tm->tm_tc_table[pkt3_dscp & 0x3F] & 0x3;
-
- uint64_t pkt0_sched = RTE_SCHED_PORT_HIERARCHY(pkt0_subport,
- pkt0_pipe,
- pkt0_tc,
- pkt0_tc_q,
- 0);
- uint64_t pkt1_sched = RTE_SCHED_PORT_HIERARCHY(pkt1_subport,
- pkt1_pipe,
- pkt1_tc,
- pkt1_tc_q,
- 0);
- uint64_t pkt2_sched = RTE_SCHED_PORT_HIERARCHY(pkt2_subport,
- pkt2_pipe,
- pkt2_tc,
- pkt2_tc_q,
- 0);
- uint64_t pkt3_sched = RTE_SCHED_PORT_HIERARCHY(pkt3_subport,
- pkt3_pipe,
- pkt3_tc,
- pkt3_tc_q,
- 0);
-
- pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
- pkt0->hash.sched.hi = pkt0_sched >> 32;
- pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
- pkt1->hash.sched.hi = pkt1_sched >> 32;
- pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
- pkt2->hash.sched.hi = pkt2_sched >> 32;
- pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
- pkt3->hash.sched.hi = pkt3_sched >> 32;
- }
-
- for (; i < n_pkts; i++) {
- struct rte_mbuf *pkt = pkts[i];
-
- uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
-
- uint64_t pkt_subport = BITFIELD(pkt_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt_pipe = BITFIELD(pkt_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt_dscp = BITFIELD(pkt_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt_tc = tm->tm_tc_table[pkt_dscp & 0x3F] >> 2;
- uint32_t pkt_tc_q = tm->tm_tc_table[pkt_dscp & 0x3F] & 0x3;
-
- uint64_t pkt_sched = RTE_SCHED_PORT_HIERARCHY(pkt_subport,
- pkt_pipe,
- pkt_tc,
- pkt_tc_q,
- 0);
-
- pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
- pkt->hash.sched.hi = pkt_sched >> 32;
- }
-}
+static struct fwd_lcore *softnic_fwd_lcore;
+static uint16_t softnic_port_id;
+struct fwd_engine softnic_fwd_engine;
/*
- * Soft port packet forward
+ * Softnic packet forward
*/
static void
-softport_packet_fwd(struct fwd_stream *fs)
+softnic_fwd(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_port *rte_tx_port = &ports[fs->tx_port];
uint16_t nb_rx;
uint16_t nb_tx;
uint32_t retry;
@@ -258,14 +106,6 @@ softport_packet_fwd(struct fwd_stream *fs)
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
- if (rte_tx_port->softnic_enable) {
- /* Set packet metadata if tm flag enabled */
- if (rte_tx_port->softport.tm_flag)
- pkt_metadata_set(rte_tx_port, pkts_burst, nb_rx);
-
- /* Softport run */
- rte_pmd_softnic_run(fs->tx_port);
- }
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
pkts_burst, nb_rx);
@@ -298,7 +138,34 @@ softport_packet_fwd(struct fwd_stream *fs)
}
static void
-set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, struct tm_hierarchy *h)
+softnic_fwd_run(struct fwd_stream *fs)
+{
+ rte_pmd_softnic_run(softnic_port_id);
+ softnic_fwd(fs);
+}
+
+/**
+ * Softnic init
+ */
+static int
+softnic_begin(void *arg __rte_unused)
+{
+ for (;;) {
+ if (!softnic_fwd_lcore->stopped)
+ break;
+ }
+
+ do {
+ /* Run softnic */
+ rte_pmd_softnic_run(softnic_port_id);
+ } while (!softnic_fwd_lcore->stopped);
+
+ return 0;
+}
+
+static void
+set_tm_hiearchy_nodes_shaper_rate(portid_t port_id,
+ struct tm_hierarchy *h)
{
struct rte_eth_link link_params;
uint64_t tm_port_rate;
@@ -306,10 +173,7 @@ set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, struct tm_hierarchy *h)
memset(&link_params, 0, sizeof(link_params));
rte_eth_link_get(port_id, &link_params);
- tm_port_rate = (uint64_t)link_params.link_speed * BYTES_IN_MBPS;
-
- if (tm_port_rate > UINT32_MAX)
- tm_port_rate = UINT32_MAX;
+ tm_port_rate = (uint64_t)ETH_SPEED_NUM_10G * BYTES_IN_MBPS;
/* Set tm hierarchy shapers rate */
h->root_node_shaper_rate = tm_port_rate;
@@ -374,7 +238,8 @@ softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h,
}
static int
-softport_tm_subport_node_add(portid_t port_id, struct tm_hierarchy *h,
+softport_tm_subport_node_add(portid_t port_id,
+ struct tm_hierarchy *h,
struct rte_tm_error *error)
{
uint32_t subport_parent_node_id, subport_node_id = 0;
@@ -442,7 +307,8 @@ softport_tm_subport_node_add(portid_t port_id, struct tm_hierarchy *h,
}
static int
-softport_tm_pipe_node_add(portid_t port_id, struct tm_hierarchy *h,
+softport_tm_pipe_node_add(portid_t port_id,
+ struct tm_hierarchy *h,
struct rte_tm_error *error)
{
uint32_t pipe_parent_node_id;
@@ -511,7 +377,8 @@ softport_tm_pipe_node_add(portid_t port_id, struct tm_hierarchy *h,
}
static int
-softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h,
+softport_tm_tc_node_add(portid_t port_id,
+ struct tm_hierarchy *h,
struct rte_tm_error *error)
{
uint32_t tc_parent_node_id;
@@ -674,63 +541,9 @@ softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h,
return 0;
}
-/*
- * TM Packet Field Setup
- */
-static void
-softport_tm_pktfield_setup(portid_t port_id)
-{
- struct rte_port *p = &ports[port_id];
- uint64_t pktfield0_mask = 0;
- uint64_t pktfield1_mask = 0x0000000FFF000000LLU;
- uint64_t pktfield2_mask = 0x00000000000000FCLLU;
-
- p->softport.tm = (struct softnic_port_tm) {
- .n_subports_per_port = SUBPORT_NODES_PER_PORT,
- .n_pipes_per_subport = PIPE_NODES_PER_SUBPORT,
-
- /* Packet field to identify subport
- *
- * Default configuration assumes only one subport, thus
- * the subport ID is hardcoded to 0
- */
- .tm_pktfield0_slabpos = 0,
- .tm_pktfield0_slabmask = pktfield0_mask,
- .tm_pktfield0_slabshr =
- __builtin_ctzll(pktfield0_mask),
-
- /* Packet field to identify pipe.
- *
- * Default value assumes Ethernet/IPv4/UDP packets,
- * UDP payload bits 12 .. 23
- */
- .tm_pktfield1_slabpos = 40,
- .tm_pktfield1_slabmask = pktfield1_mask,
- .tm_pktfield1_slabshr =
- __builtin_ctzll(pktfield1_mask),
-
- /* Packet field used as index into TC translation table
- * to identify the traffic class and queue.
- *
- * Default value assumes Ethernet/IPv4 packets, IPv4
- * DSCP field
- */
- .tm_pktfield2_slabpos = 8,
- .tm_pktfield2_slabmask = pktfield2_mask,
- .tm_pktfield2_slabshr =
- __builtin_ctzll(pktfield2_mask),
-
- .tm_tc_table = {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- }, /**< TC translation table */
- };
-}
-
static int
-softport_tm_hierarchy_specify(portid_t port_id, struct rte_tm_error *error)
+softport_tm_hierarchy_specify(portid_t port_id,
+ struct rte_tm_error *error)
{
struct tm_hierarchy h;
@@ -766,75 +579,96 @@ softport_tm_hierarchy_specify(portid_t port_id, struct rte_tm_error *error)
if (status)
return status;
- /* TM packet fields setup */
- softport_tm_pktfield_setup(port_id);
-
return 0;
}
/*
- * Soft port Init
+ * Softnic TM default configuration
*/
static void
-softport_tm_begin(portid_t pi)
+softnic_tm_default_config(portid_t pi)
{
struct rte_port *port = &ports[pi];
+ struct rte_tm_error error;
+ int status;
- /* Soft port TM flag */
- if (port->softport.tm_flag == 1) {
- printf("\n\n TM feature available on port %u\n", pi);
-
- /* Soft port TM hierarchy configuration */
- if ((port->softport.tm.hierarchy_config == 0) &&
- (port->softport.tm.default_hierarchy_enable == 1)) {
- struct rte_tm_error error;
- int status;
-
- /* Stop port */
- rte_eth_dev_stop(pi);
-
- /* TM hierarchy specification */
- status = softport_tm_hierarchy_specify(pi, &error);
- if (status) {
- printf(" TM Hierarchy built error(%d) - %s\n",
- error.type, error.message);
- return;
- }
- printf("\n TM Hierarchy Specified!\n\v");
-
- /* TM hierarchy commit */
- status = rte_tm_hierarchy_commit(pi, 0, &error);
- if (status) {
- printf(" Hierarchy commit error(%d) - %s\n",
- error.type, error.message);
- return;
- }
- printf(" Hierarchy Committed (port %u)!", pi);
- port->softport.tm.hierarchy_config = 1;
-
- /* Start port */
- status = rte_eth_dev_start(pi);
- if (status) {
- printf("\n Port %u start error!\n", pi);
- return;
- }
- printf("\n Port %u started!\n", pi);
- return;
- }
+ /* Stop port */
+ rte_eth_dev_stop(pi);
+
+ /* TM hierarchy specification */
+ status = softport_tm_hierarchy_specify(pi, &error);
+ if (status) {
+ printf(" TM Hierarchy built error(%d) - %s\n",
+ error.type, error.message);
+ return;
+ }
+ printf("\n TM Hierarchy Specified!\n");
+
+ /* TM hierarchy commit */
+ status = rte_tm_hierarchy_commit(pi, 0, &error);
+ if (status) {
+ printf(" Hierarchy commit error(%d) - %s\n",
+ error.type, error.message);
+ return;
+ }
+ printf(" Hierarchy Committed (port %u)!\n", pi);
+
+ /* Start port */
+ status = rte_eth_dev_start(pi);
+ if (status) {
+ printf("\n Port %u start error!\n", pi);
+ return;
}
- printf("\n TM feature not available on port %u", pi);
+
+ /* Reset the default hierarchy flag */
+ port->softport.default_tm_hierarchy_enable = 0;
}
-struct fwd_engine softnic_tm_engine = {
- .fwd_mode_name = "tm",
- .port_fwd_begin = softport_tm_begin,
- .port_fwd_end = NULL,
- .packet_fwd = softport_packet_fwd,
-};
+/*
+ * Softnic forwarding init
+ */
+static void
+softnic_fwd_begin(portid_t pi)
+{
+ struct rte_port *port = &ports[pi];
+ uint32_t lcore, fwd_core_present = 0, softnic_run_launch = 0;
+ int status;
+
+ softnic_fwd_lcore = port->softport.fwd_lcore_arg[0];
+ softnic_port_id = pi;
+
+ /* Launch softnic_run function on lcores */
+ for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
+ if (!rte_lcore_is_enabled(lcore))
+ continue;
+
+ if (lcore == rte_get_master_lcore())
+ continue;
+
+ if (fwd_core_present == 0) {
+ fwd_core_present++;
+ continue;
+ }
+
+ status = rte_eal_remote_launch(softnic_begin, NULL, lcore);
+ if (status)
+ printf("softnic launch on lcore %u failed (%d)\n",
+ lcore, status);
+
+ softnic_run_launch = 1;
+ }
+
+ if (!softnic_run_launch)
+ softnic_fwd_engine.packet_fwd = softnic_fwd_run;
+
+ /* Softnic TM default configuration */
+ if (port->softport.default_tm_hierarchy_enable == 1)
+ softnic_tm_default_config(pi);
+}
-struct fwd_engine softnic_tm_bypass_engine = {
- .fwd_mode_name = "tm-bypass",
- .port_fwd_begin = NULL,
+struct fwd_engine softnic_fwd_engine = {
+ .fwd_mode_name = "softnic",
+ .port_fwd_begin = softnic_fwd_begin,
.port_fwd_end = NULL,
- .packet_fwd = softport_packet_fwd,
+ .packet_fwd = softnic_fwd,
};
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 35cf2667..ee48db2a 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -127,6 +127,8 @@ portid_t nb_ports; /**< Number of probed ethernet ports. */
struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
lcoreid_t nb_lcores; /**< Number of probed logical cores. */
+portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
+
/*
* Test Forwarding Configuration.
* nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
@@ -155,9 +157,8 @@ struct fwd_engine * fwd_engines[] = {
&tx_only_engine,
&csum_fwd_engine,
&icmp_echo_engine,
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
- &softnic_tm_engine,
- &softnic_tm_bypass_engine,
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+ &softnic_fwd_engine,
#endif
#ifdef RTE_LIBRTE_IEEE1588
&ieee1588_fwd_engine,
@@ -334,7 +335,6 @@ lcoreid_t latencystats_lcore_id = -1;
struct rte_eth_rxmode rx_mode = {
.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
};
struct rte_eth_txmode tx_mode = {
@@ -346,7 +346,7 @@ struct rte_fdir_conf fdir_conf = {
.pballoc = RTE_FDIR_PBALLOC_64K,
.status = RTE_FDIR_REPORT_STATUS,
.mask = {
- .vlan_tci_mask = 0x0,
+ .vlan_tci_mask = 0xFFEF,
.ipv4_mask = {
.src_ip = 0xFFFFFFFF,
.dst_ip = 0xFFFFFFFF,
@@ -393,6 +393,38 @@ uint8_t bitrate_enabled;
struct gro_status gro_ports[RTE_MAX_ETHPORTS];
uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
+struct vxlan_encap_conf vxlan_encap_conf = {
+ .select_ipv4 = 1,
+ .select_vlan = 0,
+ .vni = "\x00\x00\x00",
+ .udp_src = 0,
+ .udp_dst = RTE_BE16(4789),
+ .ipv4_src = IPv4(127, 0, 0, 1),
+ .ipv4_dst = IPv4(255, 255, 255, 255),
+ .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x01",
+ .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x11\x11",
+ .vlan_tci = 0,
+ .eth_src = "\x00\x00\x00\x00\x00\x00",
+ .eth_dst = "\xff\xff\xff\xff\xff\xff",
+};
+
+struct nvgre_encap_conf nvgre_encap_conf = {
+ .select_ipv4 = 1,
+ .select_vlan = 0,
+ .tni = "\x00\x00\x00",
+ .ipv4_src = IPv4(127, 0, 0, 1),
+ .ipv4_dst = IPv4(255, 255, 255, 255),
+ .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x01",
+ .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x11\x11",
+ .vlan_tci = 0,
+ .eth_src = "\x00\x00\x00\x00\x00\x00",
+ .eth_dst = "\xff\xff\xff\xff\xff\xff",
+};
+
/* Forward function declarations */
static void map_port_queue_stats_mapping_registers(portid_t pi,
struct rte_port *port);
@@ -777,7 +809,7 @@ init_config(void)
init_port_config();
gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO;
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
@@ -816,6 +848,19 @@ init_config(void)
"rte_gro_ctx_create() failed\n");
}
}
+
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
+ RTE_ETH_FOREACH_DEV(pid) {
+ port = &ports[pid];
+ const char *driver = port->dev_info.driver_name;
+
+ if (strcmp(driver, "net_softnic") == 0)
+ port->softport.fwd_lcore_arg = fwd_lcores;
+ }
+ }
+#endif
+
}
@@ -1148,8 +1193,9 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
uint64_t tics_per_1sec;
uint64_t tics_datum;
uint64_t tics_current;
- uint16_t idx_port;
+ uint16_t i, cnt_ports;
+ cnt_ports = nb_ports;
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
@@ -1164,9 +1210,9 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_current = rte_rdtsc();
if (tics_current - tics_datum >= tics_per_1sec) {
/* Periodic bitrate calculation */
- RTE_ETH_FOREACH_DEV(idx_port)
+ for (i = 0; i < cnt_ports; i++)
rte_stats_bitrate_calc(bitrate_data,
- idx_port);
+ ports_ids[i]);
tics_datum = tics_current;
}
}
@@ -1645,8 +1691,6 @@ start_port(portid_t pid)
port->need_reconfig_queues = 0;
/* setup tx queues */
for (qi = 0; qi < nb_txq; qi++) {
- port->tx_conf[qi].txq_flags =
- ETH_TXQ_FLAGS_IGNORE;
if ((numa_support) &&
(txring_numa[pi] != NUMA_NO_CONFIG))
diag = rte_eth_tx_queue_setup(pi, qi,
@@ -1971,6 +2015,7 @@ attach_port(char *identifier)
reconfig(pi, socket_id);
rte_eth_promiscuous_enable(pi);
+ ports_ids[nb_ports] = pi;
nb_ports = rte_eth_dev_count_avail();
ports[pi].port_status = RTE_PORT_STOPPED;
@@ -1985,6 +2030,7 @@ void
detach_port(portid_t port_id)
{
char name[RTE_ETH_NAME_MAX_LEN];
+ uint16_t i;
printf("Detaching a port...\n");
@@ -2001,6 +2047,13 @@ detach_port(portid_t port_id)
return;
}
+ for (i = 0; i < nb_ports; i++) {
+ if (ports_ids[i] == port_id) {
+ ports_ids[i] = ports_ids[nb_ports-1];
+ ports_ids[nb_ports-1] = 0;
+ break;
+ }
+ }
nb_ports = rte_eth_dev_count_avail();
update_fwd_ports(RTE_MAX_ETHPORTS);
@@ -2355,16 +2408,15 @@ init_port_config(void)
{
portid_t pid;
struct rte_port *port;
- struct rte_eth_dev_info dev_info;
RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
port->dev_conf.fdir_conf = fdir_conf;
+ rte_eth_dev_info_get(pid, &port->dev_info);
if (nb_rxq > 1) {
- rte_eth_dev_info_get(pid, &dev_info);
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
- rss_hf & dev_info.flow_type_rss_offloads;
+ rss_hf & port->dev_info.flow_type_rss_offloads;
} else {
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
@@ -2394,17 +2446,6 @@ init_port_config(void)
(rte_eth_devices[pid].data->dev_flags &
RTE_ETH_DEV_INTR_RMV))
port->dev_conf.intr_conf.rmv = 1;
-
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
- /* Detect softnic port */
- if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
- port->softnic_enable = 1;
- memset(&port->softport, 0, sizeof(struct softnic_port));
-
- if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
- port->softport.tm_flag = 1;
- }
-#endif
}
}
@@ -2443,12 +2484,14 @@ const uint16_t vlan_tags[] = {
};
static int
-get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
+get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
enum dcb_mode_enable dcb_mode,
enum rte_eth_nb_tcs num_tcs,
uint8_t pfc_en)
{
uint8_t i;
+ int32_t rc;
+ struct rte_eth_rss_conf rss_conf;
/*
* Builds up the correct configuration for dcb+vt based on the vlan tags array
@@ -2488,6 +2531,10 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
struct rte_eth_dcb_tx_conf *tx_conf =
&eth_conf->tx_adv_conf.dcb_tx_conf;
+ rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
+ if (rc != 0)
+ return rc;
+
rx_conf->nb_tcs = num_tcs;
tx_conf->nb_tcs = num_tcs;
@@ -2495,8 +2542,9 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
rx_conf->dcb_tc[i] = i % num_tcs;
tx_conf->dcb_tc[i] = i % num_tcs;
}
+
eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
- eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
+ eth_conf->rx_adv_conf.rss_conf = rss_conf;
eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
}
@@ -2530,7 +2578,7 @@ init_port_dcb_config(portid_t pid,
port_conf.txmode = rte_port->dev_conf.txmode;
/*set configuration of DCB in vt mode and DCB in non-vt mode*/
- retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
+ retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
if (retval < 0)
return retval;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -2653,6 +2701,7 @@ main(int argc, char** argv)
{
int diag;
portid_t port_id;
+ uint16_t count;
int ret;
signal(SIGINT, signal_handler);
@@ -2672,7 +2721,12 @@ main(int argc, char** argv)
rte_pdump_init(NULL);
#endif
- nb_ports = (portid_t) rte_eth_dev_count_avail();
+ count = 0;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ ports_ids[count] = port_id;
+ count++;
+ }
+ nb_ports = (portid_t) count;
if (nb_ports == 0)
TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index f51cd9dd..a1f66147 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -57,10 +57,10 @@ typedef uint16_t streamid_t;
#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
-#define TM_MODE 1
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+#define SOFTNIC 1
#else
-#define TM_MODE 0
+#define SOFTNIC 0
#endif
enum {
@@ -135,35 +135,13 @@ struct port_flow {
uint8_t data[]; /**< Storage for pattern/actions. */
};
-#ifdef TM_MODE
-/**
- * Soft port tm related parameters
- */
-struct softnic_port_tm {
- uint32_t default_hierarchy_enable; /**< def hierarchy enable flag */
- uint32_t hierarchy_config; /**< set to 1 if hierarchy configured */
-
- uint32_t n_subports_per_port; /**< Num of subport nodes per port */
- uint32_t n_pipes_per_subport; /**< Num of pipe nodes per subport */
-
- uint64_t tm_pktfield0_slabpos; /**< Pkt field position for subport */
- uint64_t tm_pktfield0_slabmask; /**< Pkt field mask for the subport */
- uint64_t tm_pktfield0_slabshr;
- uint64_t tm_pktfield1_slabpos; /**< Pkt field position for the pipe */
- uint64_t tm_pktfield1_slabmask; /**< Pkt field mask for the pipe */
- uint64_t tm_pktfield1_slabshr;
- uint64_t tm_pktfield2_slabpos; /**< Pkt field position table index */
- uint64_t tm_pktfield2_slabmask; /**< Pkt field mask for tc table idx */
- uint64_t tm_pktfield2_slabshr;
- uint64_t tm_tc_table[64]; /**< TC translation table */
-};
-
+#ifdef SOFTNIC
/**
* The data structure associate with softnic port
*/
struct softnic_port {
- unsigned int tm_flag; /**< set to 1 if tm feature is enabled */
- struct softnic_port_tm tm; /**< softnic port tm parameters */
+ uint32_t default_tm_hierarchy_enable; /**< default tm hierarchy */
+ struct fwd_lcore **fwd_lcore_arg; /**< softnic fwd core parameters */
};
#endif
@@ -202,9 +180,8 @@ struct rte_port {
uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
uint8_t slave_flag; /**< bonding slave port */
struct port_flow *flow_list; /**< Associated flows. */
-#ifdef TM_MODE
- unsigned int softnic_enable; /**< softnic flag */
- struct softnic_port softport; /**< softnic port params */
+#ifdef SOFTNIC
+ struct softnic_port softport; /**< softnic params */
#endif
};
@@ -266,9 +243,8 @@ extern struct fwd_engine rx_only_engine;
extern struct fwd_engine tx_only_engine;
extern struct fwd_engine csum_fwd_engine;
extern struct fwd_engine icmp_echo_engine;
-#ifdef TM_MODE
-extern struct fwd_engine softnic_tm_engine;
-extern struct fwd_engine softnic_tm_bypass_engine;
+#ifdef SOFTNIC
+extern struct fwd_engine softnic_fwd_engine;
#endif
#ifdef RTE_LIBRTE_IEEE1588
extern struct fwd_engine ieee1588_fwd_engine;
@@ -479,6 +455,38 @@ struct gso_status {
extern struct gso_status gso_ports[RTE_MAX_ETHPORTS];
extern uint16_t gso_max_segment_size;
+/* VXLAN encap/decap parameters. */
+struct vxlan_encap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+ uint8_t vni[3];
+ rte_be16_t udp_src;
+ rte_be16_t udp_dst;
+ rte_be32_t ipv4_src;
+ rte_be32_t ipv4_dst;
+ uint8_t ipv6_src[16];
+ uint8_t ipv6_dst[16];
+ rte_be16_t vlan_tci;
+ uint8_t eth_src[ETHER_ADDR_LEN];
+ uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct vxlan_encap_conf vxlan_encap_conf;
+
+/* NVGRE encap/decap parameters. */
+struct nvgre_encap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+ uint8_t tni[3];
+ rte_be32_t ipv4_src;
+ rte_be32_t ipv4_dst;
+ uint8_t ipv6_src[16];
+ uint8_t ipv6_dst[16];
+ rte_be16_t vlan_tci;
+ uint8_t eth_src[ETHER_ADDR_LEN];
+ uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct nvgre_encap_conf nvgre_encap_conf;
+
static inline unsigned int
lcore_num(void)
{