summaryrefslogtreecommitdiffstats
path: root/app/test-pmd
diff options
context:
space:
mode:
authorC.J. Collier <cjcollier@linuxfoundation.org>2016-06-14 07:50:17 -0700
committerC.J. Collier <cjcollier@linuxfoundation.org>2016-06-14 12:17:54 -0700
commit97f17497d162afdb82c8704bf097f0fee3724b2e (patch)
tree1c6269614c0c15ffef8451c58ae8f8b30a1bc804 /app/test-pmd
parente04be89c2409570e0055b2cda60bd11395bb93b0 (diff)
Imported Upstream version 16.04
Change-Id: I77eadcd8538a9122e4773cbe55b24033dc451757 Signed-off-by: C.J. Collier <cjcollier@linuxfoundation.org>
Diffstat (limited to 'app/test-pmd')
-rw-r--r--app/test-pmd/Makefile73
-rw-r--r--app/test-pmd/cmdline.c10612
-rw-r--r--app/test-pmd/config.c2481
-rw-r--r--app/test-pmd/csumonly.c873
-rw-r--r--app/test-pmd/flowgen.c248
-rw-r--r--app/test-pmd/icmpecho.c542
-rw-r--r--app/test-pmd/ieee1588fwd.c248
-rw-r--r--app/test-pmd/iofwd.c128
-rw-r--r--app/test-pmd/macfwd-retry.c164
-rw-r--r--app/test-pmd/macfwd.c151
-rw-r--r--app/test-pmd/macswap.c153
-rw-r--r--app/test-pmd/mempool_anon.c201
-rw-r--r--app/test-pmd/mempool_osdep.h54
-rw-r--r--app/test-pmd/parameters.c986
-rw-r--r--app/test-pmd/rxonly.c404
-rw-r--r--app/test-pmd/testpmd.c2097
-rw-r--r--app/test-pmd/testpmd.h603
-rw-r--r--app/test-pmd/txonly.c327
18 files changed, 20345 insertions, 0 deletions
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
new file mode 100644
index 00000000..72426f31
--- /dev/null
+++ b/app/test-pmd/Makefile
@@ -0,0 +1,73 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(CONFIG_RTE_TEST_PMD),y)
+
+#
+# library name
+#
+APP = testpmd
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := testpmd.c
+SRCS-y += parameters.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline.c
+SRCS-y += config.c
+SRCS-y += iofwd.c
+SRCS-y += macfwd.c
+SRCS-y += macfwd-retry.c
+SRCS-y += macswap.c
+SRCS-y += flowgen.c
+SRCS-y += rxonly.c
+SRCS-y += txonly.c
+SRCS-y += csumonly.c
+SRCS-y += icmpecho.c
+SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
+SRCS-y += mempool_anon.c
+
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+CFLAGS_mempool_anon.o := -D_GNU_SOURCE
+endif
+CFLAGS_cmdline.o := -D_GNU_SOURCE
+
+# this application needs libraries first
+DEPDIRS-y += lib drivers
+
+include $(RTE_SDK)/mk/rte.app.mk
+
+endif
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
new file mode 100644
index 00000000..c5b94797
--- /dev/null
+++ b/app/test-pmd/cmdline.c
@@ -0,0 +1,10612 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <string.h>
+#include <termios.h>
+#include <unistd.h>
+#include <inttypes.h>
+#ifndef __linux__
+#ifndef __FreeBSD__
+#include <net/socket.h>
+#else
+#include <sys/socket.h>
+#endif
+#endif
+#include <netinet/in.h>
+
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_devargs.h>
+#include <rte_eth_ctrl.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+#ifdef RTE_LIBRTE_PMD_BOND
+#include <rte_eth_bond.h>
+#endif
+
+#include "testpmd.h"
+
+static struct cmdline *testpmd_cl;
+
+static void cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue);
+
+#ifdef RTE_NIC_BYPASS
+uint8_t bypass_is_supported(portid_t port_id);
+#endif
+
+/* *** Help command with introduction. *** */
+struct cmd_help_brief_result {
+ cmdline_fixed_string_t help;
+};
+
+static void cmd_help_brief_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ cmdline_printf(
+ cl,
+ "\n"
+ "Help is available for the following sections:\n\n"
+ " help control : Start and stop forwarding.\n"
+ " help display : Displaying port, stats and config "
+ "information.\n"
+ " help config : Configuration information.\n"
+ " help ports : Configuring ports.\n"
+ " help registers : Reading and setting port registers.\n"
+ " help filters : Filters configuration help.\n"
+ " help all : All of the above sections.\n\n"
+ );
+
+}
+
+cmdline_parse_token_string_t cmd_help_brief_help =
+ TOKEN_STRING_INITIALIZER(struct cmd_help_brief_result, help, "help");
+
+cmdline_parse_inst_t cmd_help_brief = {
+ .f = cmd_help_brief_parsed,
+ .data = NULL,
+ .help_str = "show help",
+ .tokens = {
+ (void *)&cmd_help_brief_help,
+ NULL,
+ },
+};
+
+/* *** Help command with help sections. *** */
+struct cmd_help_long_result {
+ cmdline_fixed_string_t help;
+ cmdline_fixed_string_t section;
+};
+
+static void cmd_help_long_parsed(void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int show_all = 0;
+ struct cmd_help_long_result *res = parsed_result;
+
+ if (!strcmp(res->section, "all"))
+ show_all = 1;
+
+ if (show_all || !strcmp(res->section, "control")) {
+
+ cmdline_printf(
+ cl,
+ "\n"
+ "Control forwarding:\n"
+ "-------------------\n\n"
+
+ "start\n"
+ " Start packet forwarding with current configuration.\n\n"
+
+ "start tx_first\n"
+ " Start packet forwarding with current config"
+ " after sending one burst of packets.\n\n"
+
+ "stop\n"
+ " Stop packet forwarding, and display accumulated"
+ " statistics.\n\n"
+
+ "quit\n"
+ " Quit to prompt.\n\n"
+ );
+ }
+
+ if (show_all || !strcmp(res->section, "display")) {
+
+ cmdline_printf(
+ cl,
+ "\n"
+ "Display:\n"
+ "--------\n\n"
+
+ "show port (info|stats|xstats|fdir|stat_qmap|dcb_tc) (port_id|all)\n"
+ " Display information for port_id, or all.\n\n"
+
+ "show port X rss reta (size) (mask0,mask1,...)\n"
+ " Display the rss redirection table entry indicated"
+ " by masks on port X. size is used to indicate the"
+ " hardware supported reta size\n\n"
+
+ "show port rss-hash ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|"
+ "ipv4-sctp|ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|"
+ "ipv6-other|l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex [key]\n"
+ " Display the RSS hash functions and RSS hash key"
+ " of port X\n\n"
+
+ "clear port (info|stats|xstats|fdir|stat_qmap) (port_id|all)\n"
+ " Clear information for port_id, or all.\n\n"
+
+ "show (rxq|txq) info (port_id) (queue_id)\n"
+ " Display information for configured RX/TX queue.\n\n"
+
+ "show config (rxtx|cores|fwd|txpkts)\n"
+ " Display the given configuration.\n\n"
+
+ "read rxd (port_id) (queue_id) (rxd_id)\n"
+ " Display an RX descriptor of a port RX queue.\n\n"
+
+ "read txd (port_id) (queue_id) (txd_id)\n"
+ " Display a TX descriptor of a port TX queue.\n\n"
+ );
+ }
+
+ if (show_all || !strcmp(res->section, "config")) {
+ cmdline_printf(
+ cl,
+ "\n"
+ "Configuration:\n"
+ "--------------\n"
+ "Configuration changes only become active when"
+ " forwarding is started/restarted.\n\n"
+
+ "set default\n"
+ " Reset forwarding to the default configuration.\n\n"
+
+ "set verbose (level)\n"
+ " Set the debug verbosity level X.\n\n"
+
+ "set nbport (num)\n"
+ " Set number of ports.\n\n"
+
+ "set nbcore (num)\n"
+ " Set number of cores.\n\n"
+
+ "set coremask (mask)\n"
+ " Set the forwarding cores hexadecimal mask.\n\n"
+
+ "set portmask (mask)\n"
+ " Set the forwarding ports hexadecimal mask.\n\n"
+
+ "set burst (num)\n"
+ " Set number of packets per burst.\n\n"
+
+ "set burst tx delay (microseconds) retry (num)\n"
+ " Set the transmit delay time and number of retries"
+ " in mac_retry forwarding mode.\n\n"
+
+ "set txpkts (x[,y]*)\n"
+ " Set the length of each segment of TXONLY"
+ " and optionally CSUM packets.\n\n"
+
+ "set txsplit (off|on|rand)\n"
+ " Set the split policy for the TX packets."
+ " Right now only applicable for CSUM and TXONLY"
+ " modes\n\n"
+
+ "set corelist (x[,y]*)\n"
+ " Set the list of forwarding cores.\n\n"
+
+ "set portlist (x[,y]*)\n"
+ " Set the list of forwarding ports.\n\n"
+
+ "vlan set strip (on|off) (port_id)\n"
+ " Set the VLAN strip on a port.\n\n"
+
+ "vlan set stripq (on|off) (port_id,queue_id)\n"
+ " Set the VLAN strip for a queue on a port.\n\n"
+
+ "vlan set filter (on|off) (port_id)\n"
+ " Set the VLAN filter on a port.\n\n"
+
+ "vlan set qinq (on|off) (port_id)\n"
+ " Set the VLAN QinQ (extended queue in queue)"
+ " on a port.\n\n"
+
+ "vlan set (inner|outer) tpid (value) (port_id)\n"
+ " Set the VLAN TPID for Packet Filtering on"
+ " a port\n\n"
+
+ "rx_vlan add (vlan_id|all) (port_id)\n"
+ " Add a vlan_id, or all identifiers, to the set"
+ " of VLAN identifiers filtered by port_id.\n\n"
+
+ "rx_vlan rm (vlan_id|all) (port_id)\n"
+ " Remove a vlan_id, or all identifiers, from the set"
+ " of VLAN identifiers filtered by port_id.\n\n"
+
+ "rx_vlan add (vlan_id) port (port_id) vf (vf_mask)\n"
+ " Add a vlan_id, to the set of VLAN identifiers"
+ "filtered for VF(s) from port_id.\n\n"
+
+ "rx_vlan rm (vlan_id) port (port_id) vf (vf_mask)\n"
+ " Remove a vlan_id, to the set of VLAN identifiers"
+ "filtered for VF(s) from port_id.\n\n"
+
+ "tunnel_filter add (port_id) (outer_mac) (inner_mac) (ip_addr) "
+ "(inner_vlan) (vxlan|nvgre|ipingre) (imac-ivlan|imac-ivlan-tenid|"
+ "imac-tenid|imac|omac-imac-tenid|oip|iip) (tenant_id) (queue_id)\n"
+ " add a tunnel filter of a port.\n\n"
+
+ "tunnel_filter rm (port_id) (outer_mac) (inner_mac) (ip_addr) "
+ "(inner_vlan) (vxlan|nvgre|ipingre) (imac-ivlan|imac-ivlan-tenid|"
+ "imac-tenid|imac|omac-imac-tenid|oip|iip) (tenant_id) (queue_id)\n"
+ " remove a tunnel filter of a port.\n\n"
+
+ "rx_vxlan_port add (udp_port) (port_id)\n"
+ " Add an UDP port for VXLAN packet filter on a port\n\n"
+
+ "rx_vxlan_port rm (udp_port) (port_id)\n"
+ " Remove an UDP port for VXLAN packet filter on a port\n\n"
+
+ "tx_vlan set (port_id) vlan_id[, vlan_id_outer]\n"
+ " Set hardware insertion of VLAN IDs (single or double VLAN "
+ "depends on the number of VLAN IDs) in packets sent on a port.\n\n"
+
+ "tx_vlan set pvid port_id vlan_id (on|off)\n"
+ " Set port based TX VLAN insertion.\n\n"
+
+ "tx_vlan reset (port_id)\n"
+ " Disable hardware insertion of a VLAN header in"
+ " packets sent on a port.\n\n"
+
+ "csum set (ip|udp|tcp|sctp|outer-ip) (hw|sw) (port_id)\n"
+ " Select hardware or software calculation of the"
+ " checksum when transmitting a packet using the"
+ " csum forward engine.\n"
+ " ip|udp|tcp|sctp always concern the inner layer.\n"
+ " outer-ip concerns the outer IP layer in"
+ " case the packet is recognized as a tunnel packet by"
+ " the forward engine (vxlan, gre and ipip are supported)\n"
+ " Please check the NIC datasheet for HW limits.\n\n"
+
+ "csum parse-tunnel (on|off) (tx_port_id)\n"
+ " If disabled, treat tunnel packets as non-tunneled"
+ " packets (treat inner headers as payload). The port\n"
+ " argument is the port used for TX in csum forward"
+ " engine.\n\n"
+
+ "csum show (port_id)\n"
+ " Display tx checksum offload configuration\n\n"
+
+ "tso set (segsize) (portid)\n"
+ " Enable TCP Segmentation Offload in csum forward"
+ " engine.\n"
+ " Please check the NIC datasheet for HW limits.\n\n"
+
+ "tso show (portid)"
+ " Display the status of TCP Segmentation Offload.\n\n"
+
+ "set fwd (%s)\n"
+ " Set packet forwarding mode.\n\n"
+
+ "mac_addr add (port_id) (XX:XX:XX:XX:XX:XX)\n"
+ " Add a MAC address on port_id.\n\n"
+
+ "mac_addr remove (port_id) (XX:XX:XX:XX:XX:XX)\n"
+ " Remove a MAC address from port_id.\n\n"
+
+ "mac_addr add port (port_id) vf (vf_id) (mac_address)\n"
+ " Add a MAC address for a VF on the port.\n\n"
+
+ "set port (port_id) uta (mac_address|all) (on|off)\n"
+ " Add/Remove a or all unicast hash filter(s)"
+ "from port X.\n\n"
+
+ "set promisc (port_id|all) (on|off)\n"
+ " Set the promiscuous mode on port_id, or all.\n\n"
+
+ "set allmulti (port_id|all) (on|off)\n"
+ " Set the allmulti mode on port_id, or all.\n\n"
+
+ "set flow_ctrl rx (on|off) tx (on|off) (high_water)"
+ " (low_water) (pause_time) (send_xon) mac_ctrl_frame_fwd"
+ " (on|off) autoneg (on|off) (port_id)\n"
+ "set flow_ctrl rx (on|off) (portid)\n"
+ "set flow_ctrl tx (on|off) (portid)\n"
+ "set flow_ctrl high_water (high_water) (portid)\n"
+ "set flow_ctrl low_water (low_water) (portid)\n"
+ "set flow_ctrl pause_time (pause_time) (portid)\n"
+ "set flow_ctrl send_xon (send_xon) (portid)\n"
+ "set flow_ctrl mac_ctrl_frame_fwd (on|off) (portid)\n"
+ "set flow_ctrl autoneg (on|off) (port_id)\n"
+ " Set the link flow control parameter on a port.\n\n"
+
+ "set pfc_ctrl rx (on|off) tx (on|off) (high_water)"
+ " (low_water) (pause_time) (priority) (port_id)\n"
+ " Set the priority flow control parameter on a"
+ " port.\n\n"
+
+ "set stat_qmap (tx|rx) (port_id) (queue_id) (qmapping)\n"
+ " Set statistics mapping (qmapping 0..15) for RX/TX"
+ " queue on port.\n"
+ " e.g., 'set stat_qmap rx 0 2 5' sets rx queue 2"
+ " on port 0 to mapping 5.\n\n"
+
+ "set port (port_id) vf (vf_id) rx|tx on|off\n"
+ " Enable/Disable a VF receive/tranmit from a port\n\n"
+
+ "set port (port_id) vf (vf_id) (mac_addr)"
+ " (exact-mac#exact-mac-vlan#hashmac|hashmac-vlan) on|off\n"
+ " Add/Remove unicast or multicast MAC addr filter"
+ " for a VF.\n\n"
+
+ "set port (port_id) vf (vf_id) rxmode (AUPE|ROPE|BAM"
+ "|MPE) (on|off)\n"
+ " AUPE:accepts untagged VLAN;"
+ "ROPE:accept unicast hash\n\n"
+ " BAM:accepts broadcast packets;"
+ "MPE:accepts all multicast packets\n\n"
+ " Enable/Disable a VF receive mode of a port\n\n"
+
+ "set port (port_id) queue (queue_id) rate (rate_num)\n"
+ " Set rate limit for a queue of a port\n\n"
+
+ "set port (port_id) vf (vf_id) rate (rate_num) "
+ "queue_mask (queue_mask_value)\n"
+ " Set rate limit for queues in VF of a port\n\n"
+
+ "set port (port_id) mirror-rule (rule_id)"
+ " (pool-mirror-up|pool-mirror-down|vlan-mirror)"
+ " (poolmask|vlanid[,vlanid]*) dst-pool (pool_id) (on|off)\n"
+ " Set pool or vlan type mirror rule on a port.\n"
+ " e.g., 'set port 0 mirror-rule 0 vlan-mirror 0,1"
+ " dst-pool 0 on' enable mirror traffic with vlan 0,1"
+ " to pool 0.\n\n"
+
+ "set port (port_id) mirror-rule (rule_id)"
+ " (uplink-mirror|downlink-mirror) dst-pool"
+ " (pool_id) (on|off)\n"
+ " Set uplink or downlink type mirror rule on a port.\n"
+ " e.g., 'set port 0 mirror-rule 0 uplink-mirror dst-pool"
+ " 0 on' enable mirror income traffic to pool 0.\n\n"
+
+ "reset port (port_id) mirror-rule (rule_id)\n"
+ " Reset a mirror rule.\n\n"
+
+ "set flush_rx (on|off)\n"
+ " Flush (default) or don't flush RX streams before"
+ " forwarding. Mainly used with PCAP drivers.\n\n"
+
+ #ifdef RTE_NIC_BYPASS
+ "set bypass mode (normal|bypass|isolate) (port_id)\n"
+ " Set the bypass mode for the lowest port on bypass enabled"
+ " NIC.\n\n"
+
+ "set bypass event (timeout|os_on|os_off|power_on|power_off) "
+ "mode (normal|bypass|isolate) (port_id)\n"
+ " Set the event required to initiate specified bypass mode for"
+ " the lowest port on a bypass enabled NIC where:\n"
+ " timeout = enable bypass after watchdog timeout.\n"
+ " os_on = enable bypass when OS/board is powered on.\n"
+ " os_off = enable bypass when OS/board is powered off.\n"
+ " power_on = enable bypass when power supply is turned on.\n"
+ " power_off = enable bypass when power supply is turned off."
+ "\n\n"
+
+ "set bypass timeout (0|1.5|2|3|4|8|16|32)\n"
+ " Set the bypass watchdog timeout to 'n' seconds"
+ " where 0 = instant.\n\n"
+
+ "show bypass config (port_id)\n"
+ " Show the bypass configuration for a bypass enabled NIC"
+ " using the lowest port on the NIC.\n\n"
+#endif
+#ifdef RTE_LIBRTE_PMD_BOND
+ "create bonded device (mode) (socket)\n"
+ " Create a new bonded device with specific bonding mode and socket.\n\n"
+
+ "add bonding slave (slave_id) (port_id)\n"
+ " Add a slave device to a bonded device.\n\n"
+
+ "remove bonding slave (slave_id) (port_id)\n"
+ " Remove a slave device from a bonded device.\n\n"
+
+ "set bonding mode (value) (port_id)\n"
+ " Set the bonding mode on a bonded device.\n\n"
+
+ "set bonding primary (slave_id) (port_id)\n"
+ " Set the primary slave for a bonded device.\n\n"
+
+ "show bonding config (port_id)\n"
+ " Show the bonding config for port_id.\n\n"
+
+ "set bonding mac_addr (port_id) (address)\n"
+ " Set the MAC address of a bonded device.\n\n"
+
+ "set bonding xmit_balance_policy (port_id) (l2|l23|l34)\n"
+ " Set the transmit balance policy for bonded device running in balance mode.\n\n"
+
+ "set bonding mon_period (port_id) (value)\n"
+ " Set the bonding link status monitoring polling period in ms.\n\n"
+#endif
+ "set link-up port (port_id)\n"
+ " Set link up for a port.\n\n"
+
+ "set link-down port (port_id)\n"
+ " Set link down for a port.\n\n"
+
+ "E-tag set insertion on port-tag-id (value)"
+ " port (port_id) vf (vf_id)\n"
+ " Enable E-tag insertion for a VF on a port\n\n"
+
+ "E-tag set insertion off port (port_id) vf (vf_id)\n"
+ " Disable E-tag insertion for a VF on a port\n\n"
+
+ "E-tag set stripping (on|off) port (port_id)\n"
+ " Enable/disable E-tag stripping on a port\n\n"
+
+ "E-tag set forwarding (on|off) port (port_id)\n"
+ " Enable/disable E-tag based forwarding"
+ " on a port\n\n"
+
+ "E-tag set filter add e-tag-id (value) dst-pool"
+ " (pool_id) port (port_id)\n"
+ " Add an E-tag forwarding filter on a port\n\n"
+
+ "E-tag set filter del e-tag-id (value) port (port_id)\n"
+ " Delete an E-tag forwarding filter on a port\n\n"
+
+ , list_pkt_forwarding_modes()
+ );
+ }
+
+ if (show_all || !strcmp(res->section, "ports")) {
+
+ cmdline_printf(
+ cl,
+ "\n"
+ "Port Operations:\n"
+ "----------------\n\n"
+
+ "port start (port_id|all)\n"
+ " Start all ports or port_id.\n\n"
+
+ "port stop (port_id|all)\n"
+ " Stop all ports or port_id.\n\n"
+
+ "port close (port_id|all)\n"
+ " Close all ports or port_id.\n\n"
+
+ "port attach (ident)\n"
+ " Attach physical or virtual dev by pci address or virtual device name\n\n"
+
+ "port detach (port_id)\n"
+ " Detach physical or virtual dev by port_id\n\n"
+
+ "port config (port_id|all)"
+ " speed (10|100|1000|10000|40000|100000|auto)"
+ " duplex (half|full|auto)\n"
+ " Set speed and duplex for all ports or port_id\n\n"
+
+ "port config all (rxq|txq|rxd|txd) (value)\n"
+ " Set number for rxq/txq/rxd/txd.\n\n"
+
+ "port config all max-pkt-len (value)\n"
+ " Set the max packet length.\n\n"
+
+ "port config all (crc-strip|rx-cksum|hw-vlan|hw-vlan-filter|"
+ "hw-vlan-strip|hw-vlan-extend|drop-en)"
+ " (on|off)\n"
+ " Set crc-strip/rx-checksum/hardware-vlan/drop_en"
+ " for ports.\n\n"
+
+ "port config all rss (all|ip|tcp|udp|sctp|ether|none)\n"
+ " Set the RSS mode.\n\n"
+
+ "port config port-id rss reta (hash,queue)[,(hash,queue)]\n"
+ " Set the RSS redirection table.\n\n"
+
+ "port config (port_id) dcb vt (on|off) (traffic_class)"
+ " pfc (on|off)\n"
+ " Set the DCB mode.\n\n"
+
+ "port config all burst (value)\n"
+ " Set the number of packets per burst.\n\n"
+
+ "port config all (txpt|txht|txwt|rxpt|rxht|rxwt)"
+ " (value)\n"
+ " Set the ring prefetch/host/writeback threshold"
+ " for tx/rx queue.\n\n"
+
+ "port config all (txfreet|txrst|rxfreet) (value)\n"
+ " Set free threshold for rx/tx, or set"
+ " tx rs bit threshold.\n\n"
+ "port config mtu X value\n"
+ " Set the MTU of port X to a given value\n\n"
+
+ "port (port_id) (rxq|txq) (queue_id) (start|stop)\n"
+ " Start/stop a rx/tx queue of port X. Only take effect"
+ " when port X is started\n\n"
+
+ "port config (port_id|all) l2-tunnel E-tag ether-type"
+ " (value)\n"
+ " Set the value of E-tag ether-type.\n\n"
+
+ "port config (port_id|all) l2-tunnel E-tag"
+ " (enable|disable)\n"
+ " Enable/disable the E-tag support.\n\n"
+ );
+ }
+
+ if (show_all || !strcmp(res->section, "registers")) {
+
+ cmdline_printf(
+ cl,
+ "\n"
+ "Registers:\n"
+ "----------\n\n"
+
+ "read reg (port_id) (address)\n"
+ " Display value of a port register.\n\n"
+
+ "read regfield (port_id) (address) (bit_x) (bit_y)\n"
+ " Display a port register bit field.\n\n"
+
+ "read regbit (port_id) (address) (bit_x)\n"
+ " Display a single port register bit.\n\n"
+
+ "write reg (port_id) (address) (value)\n"
+ " Set value of a port register.\n\n"
+
+ "write regfield (port_id) (address) (bit_x) (bit_y)"
+ " (value)\n"
+ " Set bit field of a port register.\n\n"
+
+ "write regbit (port_id) (address) (bit_x) (value)\n"
+ " Set single bit value of a port register.\n\n"
+ );
+ }
+ if (show_all || !strcmp(res->section, "filters")) {
+
+ cmdline_printf(
+ cl,
+ "\n"
+ "filters:\n"
+ "--------\n\n"
+
+ "ethertype_filter (port_id) (add|del)"
+ " (mac_addr|mac_ignr) (mac_address) ethertype"
+ " (ether_type) (drop|fwd) queue (queue_id)\n"
+ " Add/Del an ethertype filter.\n\n"
+
+ "2tuple_filter (port_id) (add|del)"
+ " dst_port (dst_port_value) protocol (protocol_value)"
+ " mask (mask_value) tcp_flags (tcp_flags_value)"
+ " priority (prio_value) queue (queue_id)\n"
+ " Add/Del a 2tuple filter.\n\n"
+
+ "5tuple_filter (port_id) (add|del)"
+ " dst_ip (dst_address) src_ip (src_address)"
+ " dst_port (dst_port_value) src_port (src_port_value)"
+ " protocol (protocol_value)"
+ " mask (mask_value) tcp_flags (tcp_flags_value)"
+ " priority (prio_value) queue (queue_id)\n"
+ " Add/Del a 5tuple filter.\n\n"
+
+ "syn_filter (port_id) (add|del) priority (high|low) queue (queue_id)"
+ " Add/Del syn filter.\n\n"
+
+ "flex_filter (port_id) (add|del) len (len_value)"
+ " bytes (bytes_value) mask (mask_value)"
+ " priority (prio_value) queue (queue_id)\n"
+ " Add/Del a flex filter.\n\n"
+
+ "flow_director_filter (port_id) mode IP (add|del|update)"
+ " flow (ipv4-other|ipv4-frag|ipv6-other|ipv6-frag)"
+ " src (src_ip_address) dst (dst_ip_address)"
+ " tos (tos_value) proto (proto_value) ttl (ttl_value)"
+ " vlan (vlan_value) flexbytes (flexbytes_value)"
+ " (drop|fwd) pf|vf(vf_id) queue (queue_id)"
+ " fd_id (fd_id_value)\n"
+ " Add/Del an IP type flow director filter.\n\n"
+
+ "flow_director_filter (port_id) mode IP (add|del|update)"
+ " flow (ipv4-tcp|ipv4-udp|ipv6-tcp|ipv6-udp)"
+ " src (src_ip_address) (src_port)"
+ " dst (dst_ip_address) (dst_port)"
+ " tos (tos_value) ttl (ttl_value)"
+ " vlan (vlan_value) flexbytes (flexbytes_value)"
+ " (drop|fwd) pf|vf(vf_id) queue (queue_id)"
+ " fd_id (fd_id_value)\n"
+ " Add/Del an UDP/TCP type flow director filter.\n\n"
+
+ "flow_director_filter (port_id) mode IP (add|del|update)"
+ " flow (ipv4-sctp|ipv6-sctp)"
+ " src (src_ip_address) (src_port)"
+ " dst (dst_ip_address) (dst_port)"
+ " tag (verification_tag) "
+ " tos (tos_value) ttl (ttl_value)"
+ " vlan (vlan_value)"
+ " flexbytes (flexbytes_value) (drop|fwd)"
+ " pf|vf(vf_id) queue (queue_id) fd_id (fd_id_value)\n"
+ " Add/Del a SCTP type flow director filter.\n\n"
+
+ "flow_director_filter (port_id) mode IP (add|del|update)"
+ " flow l2_payload ether (ethertype)"
+ " flexbytes (flexbytes_value) (drop|fwd)"
+ " pf|vf(vf_id) queue (queue_id) fd_id (fd_id_value)\n"
+ " Add/Del a l2 payload type flow director filter.\n\n"
+
+ "flow_director_filter (port_id) mode MAC-VLAN (add|del|update)"
+ " mac (mac_address) vlan (vlan_value)"
+ " flexbytes (flexbytes_value) (drop|fwd)"
+ " queue (queue_id) fd_id (fd_id_value)\n"
+ " Add/Del a MAC-VLAN flow director filter.\n\n"
+
+ "flow_director_filter (port_id) mode Tunnel (add|del|update)"
+ " mac (mac_address) vlan (vlan_value)"
+ " tunnel (NVGRE|VxLAN) tunnel-id (tunnel_id_value)"
+ " flexbytes (flexbytes_value) (drop|fwd)"
+ " queue (queue_id) fd_id (fd_id_value)\n"
+ " Add/Del a Tunnel flow director filter.\n\n"
+
+ "flush_flow_director (port_id)\n"
+ " Flush all flow director entries of a device.\n\n"
+
+ "flow_director_mask (port_id) mode IP vlan (vlan_value)"
+ " src_mask (ipv4_src) (ipv6_src) (src_port)"
+ " dst_mask (ipv4_dst) (ipv6_dst) (dst_port)\n"
+ " Set flow director IP mask.\n\n"
+
+ "flow_director_mask (port_id) mode MAC-VLAN"
+ " vlan (vlan_value) mac (mac_value)\n"
+ " Set flow director MAC-VLAN mask.\n\n"
+
+ "flow_director_mask (port_id) mode Tunnel"
+ " vlan (vlan_value) mac (mac_value)"
+ " tunnel-type (tunnel_type_value)"
+ " tunnel-id (tunnel_id_value)\n"
+ " Set flow director Tunnel mask.\n\n"
+
+ "flow_director_flex_mask (port_id)"
+ " flow (none|ipv4-other|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|"
+ "ipv6-other|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|l2_payload|all)"
+ " (mask)\n"
+ " Configure mask of flex payload.\n\n"
+
+ "flow_director_flex_payload (port_id)"
+ " (raw|l2|l3|l4) (config)\n"
+ " Configure flex payload selection.\n\n"
+
+ "get_sym_hash_ena_per_port (port_id)\n"
+ " get symmetric hash enable configuration per port.\n\n"
+
+ "set_sym_hash_ena_per_port (port_id) (enable|disable)\n"
+ " set symmetric hash enable configuration per port"
+ " to enable or disable.\n\n"
+
+ "get_hash_global_config (port_id)\n"
+ " Get the global configurations of hash filters.\n\n"
+
+ "set_hash_global_config (port_id) (toeplitz|simple_xor|default)"
+ " (ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload)"
+ " (enable|disable)\n"
+ " Set the global configurations of hash filters.\n\n"
+
+ "set_hash_input_set (port_id) (ipv4|ipv4-frag|"
+ "ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
+ "l2_payload) (ovlan|ivlan|src-ipv4|dst-ipv4|src-ipv6|"
+ "dst-ipv6|ipv4-tos|ipv4-proto|ipv6-tc|"
+ "ipv6-next-header|udp-src-port|udp-dst-port|"
+ "tcp-src-port|tcp-dst-port|sctp-src-port|"
+ "sctp-dst-port|sctp-veri-tag|udp-key|gre-key|fld-1st|"
+ "fld-2nd|fld-3rd|fld-4th|fld-5th|fld-6th|fld-7th|"
+ "fld-8th|none) (select|add)\n"
+ " Set the input set for hash.\n\n"
+
+ "set_fdir_input_set (port_id) "
+ "(ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
+ "l2_payload) (ivlan|ethertype|src-ipv4|dst-ipv4|src-ipv6|"
+ "dst-ipv6|ipv4-tos|ipv4-proto|ipv4-ttl|ipv6-tc|"
+ "ipv6-next-header|ipv6-hop-limits|udp-src-port|"
+ "udp-dst-port|tcp-src-port|tcp-dst-port|"
+ "sctp-src-port|sctp-dst-port|sctp-veri-tag|none)"
+ " (select|add)\n"
+ " Set the input set for FDir.\n\n"
+ );
+ }
+}
+
+cmdline_parse_token_string_t cmd_help_long_help =
+ TOKEN_STRING_INITIALIZER(struct cmd_help_long_result, help, "help");
+
+cmdline_parse_token_string_t cmd_help_long_section =
+ TOKEN_STRING_INITIALIZER(struct cmd_help_long_result, section,
+ "all#control#display#config#"
+ "ports#registers#filters");
+
+cmdline_parse_inst_t cmd_help_long = {
+ .f = cmd_help_long_parsed,
+ .data = NULL,
+ .help_str = "show help",
+ .tokens = {
+ (void *)&cmd_help_long_help,
+ (void *)&cmd_help_long_section,
+ NULL,
+ },
+};
+
+
+/* *** start/stop/close all ports *** */
+struct cmd_operate_port_result {
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t name;
+ cmdline_fixed_string_t value;
+};
+
+static void cmd_operate_port_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_operate_port_result *res = parsed_result;
+
+ if (!strcmp(res->name, "start"))
+ start_port(RTE_PORT_ALL);
+ else if (!strcmp(res->name, "stop"))
+ stop_port(RTE_PORT_ALL);
+ else if (!strcmp(res->name, "close"))
+ close_port(RTE_PORT_ALL);
+ else
+ printf("Unknown parameter\n");
+}
+
+cmdline_parse_token_string_t cmd_operate_port_all_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_port_result, keyword,
+ "port");
+cmdline_parse_token_string_t cmd_operate_port_all_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_port_result, name,
+ "start#stop#close");
+cmdline_parse_token_string_t cmd_operate_port_all_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_port_result, value, "all");
+
+cmdline_parse_inst_t cmd_operate_port = {
+ .f = cmd_operate_port_parsed,
+ .data = NULL,
+ .help_str = "port start|stop|close all: start/stop/close all ports",
+ .tokens = {
+ (void *)&cmd_operate_port_all_cmd,
+ (void *)&cmd_operate_port_all_port,
+ (void *)&cmd_operate_port_all_all,
+ NULL,
+ },
+};
+
+/* *** start/stop/close specific port *** */
+struct cmd_operate_specific_port_result {
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t name;
+ uint8_t value;
+};
+
+static void cmd_operate_specific_port_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_operate_specific_port_result *res = parsed_result;
+
+ if (!strcmp(res->name, "start"))
+ start_port(res->value);
+ else if (!strcmp(res->name, "stop"))
+ stop_port(res->value);
+ else if (!strcmp(res->name, "close"))
+ close_port(res->value);
+ else
+ printf("Unknown parameter\n");
+}
+
+cmdline_parse_token_string_t cmd_operate_specific_port_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_specific_port_result,
+ keyword, "port");
+cmdline_parse_token_string_t cmd_operate_specific_port_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_specific_port_result,
+ name, "start#stop#close");
+cmdline_parse_token_num_t cmd_operate_specific_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_operate_specific_port_result,
+ value, UINT8);
+
+cmdline_parse_inst_t cmd_operate_specific_port = {
+ .f = cmd_operate_specific_port_parsed,
+ .data = NULL,
+ .help_str = "port start|stop|close X: start/stop/close port X",
+ .tokens = {
+ (void *)&cmd_operate_specific_port_cmd,
+ (void *)&cmd_operate_specific_port_port,
+ (void *)&cmd_operate_specific_port_id,
+ NULL,
+ },
+};
+
+/* *** attach a specified port *** */
+struct cmd_operate_attach_port_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t identifier;
+};
+
+static void cmd_operate_attach_port_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_operate_attach_port_result *res = parsed_result;
+
+ if (!strcmp(res->keyword, "attach"))
+ attach_port(res->identifier);
+ else
+ printf("Unknown parameter\n");
+}
+
+cmdline_parse_token_string_t cmd_operate_attach_port_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_attach_port_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_operate_attach_port_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_attach_port_result,
+ keyword, "attach");
+cmdline_parse_token_string_t cmd_operate_attach_port_identifier =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_attach_port_result,
+ identifier, NULL);
+
+cmdline_parse_inst_t cmd_operate_attach_port = {
+ .f = cmd_operate_attach_port_parsed,
+ .data = NULL,
+ .help_str = "port attach identifier, "
+ "identifier: pci address or virtual dev name",
+ .tokens = {
+ (void *)&cmd_operate_attach_port_port,
+ (void *)&cmd_operate_attach_port_keyword,
+ (void *)&cmd_operate_attach_port_identifier,
+ NULL,
+ },
+};
+
+/* *** detach a specified port *** */
+struct cmd_operate_detach_port_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ uint8_t port_id;
+};
+
+static void cmd_operate_detach_port_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_operate_detach_port_result *res = parsed_result;
+
+ if (!strcmp(res->keyword, "detach"))
+ detach_port(res->port_id);
+ else
+ printf("Unknown parameter\n");
+}
+
+cmdline_parse_token_string_t cmd_operate_detach_port_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_detach_port_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_operate_detach_port_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_detach_port_result,
+ keyword, "detach");
+cmdline_parse_token_num_t cmd_operate_detach_port_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_operate_detach_port_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_operate_detach_port = {
+ .f = cmd_operate_detach_port_parsed,
+ .data = NULL,
+ .help_str = "port detach port_id",
+ .tokens = {
+ (void *)&cmd_operate_detach_port_port,
+ (void *)&cmd_operate_detach_port_keyword,
+ (void *)&cmd_operate_detach_port_port_id,
+ NULL,
+ },
+};
+
+/* *** configure speed for all ports *** */
+struct cmd_config_speed_all {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t item1;
+ cmdline_fixed_string_t item2;
+ cmdline_fixed_string_t value1;
+ cmdline_fixed_string_t value2;
+};
+
+static int
+parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
+{
+
+ int duplex;
+
+ if (!strcmp(duplexstr, "half")) {
+ duplex = ETH_LINK_HALF_DUPLEX;
+ } else if (!strcmp(duplexstr, "full")) {
+ duplex = ETH_LINK_FULL_DUPLEX;
+ } else if (!strcmp(duplexstr, "auto")) {
+ duplex = ETH_LINK_FULL_DUPLEX;
+ } else {
+ printf("Unknown duplex parameter\n");
+ return -1;
+ }
+
+ if (!strcmp(speedstr, "10")) {
+ *speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
+ ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
+ } else if (!strcmp(speedstr, "100")) {
+ *speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
+ ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
+ } else {
+ if (duplex != ETH_LINK_FULL_DUPLEX) {
+ printf("Invalid speed/duplex parameters\n");
+ return -1;
+ }
+ if (!strcmp(speedstr, "1000")) {
+ *speed = ETH_LINK_SPEED_1G;
+ } else if (!strcmp(speedstr, "10000")) {
+ *speed = ETH_LINK_SPEED_10G;
+ } else if (!strcmp(speedstr, "40000")) {
+ *speed = ETH_LINK_SPEED_40G;
+ } else if (!strcmp(speedstr, "100000")) {
+ *speed = ETH_LINK_SPEED_100G;
+ } else if (!strcmp(speedstr, "auto")) {
+ *speed = ETH_LINK_SPEED_AUTONEG;
+ } else {
+ printf("Unknown speed parameter\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cmd_config_speed_all_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_speed_all *res = parsed_result;
+ uint32_t link_speed;
+ portid_t pid;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ if (parse_and_check_speed_duplex(res->value1, res->value2,
+ &link_speed) < 0)
+ return;
+
+ FOREACH_PORT(pid, ports) {
+ ports[pid].dev_conf.link_speeds = link_speed;
+ }
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_speed_all_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, port, "port");
+cmdline_parse_token_string_t cmd_config_speed_all_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, keyword,
+ "config");
+cmdline_parse_token_string_t cmd_config_speed_all_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, all, "all");
+cmdline_parse_token_string_t cmd_config_speed_all_item1 =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, item1, "speed");
+cmdline_parse_token_string_t cmd_config_speed_all_value1 =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, value1,
+ "10#100#1000#10000#40000#100000#auto");
+cmdline_parse_token_string_t cmd_config_speed_all_item2 =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, item2, "duplex");
+cmdline_parse_token_string_t cmd_config_speed_all_value2 =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, value2,
+ "half#full#auto");
+
+cmdline_parse_inst_t cmd_config_speed_all = {
+ .f = cmd_config_speed_all_parsed,
+ .data = NULL,
+ .help_str = "port config all speed 10|100|1000|10000|40000|100000|auto duplex "
+ "half|full|auto",
+ .tokens = {
+ (void *)&cmd_config_speed_all_port,
+ (void *)&cmd_config_speed_all_keyword,
+ (void *)&cmd_config_speed_all_all,
+ (void *)&cmd_config_speed_all_item1,
+ (void *)&cmd_config_speed_all_value1,
+ (void *)&cmd_config_speed_all_item2,
+ (void *)&cmd_config_speed_all_value2,
+ NULL,
+ },
+};
+
+/* *** configure speed for specific port *** */
+struct cmd_config_speed_specific {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ uint8_t id;
+ cmdline_fixed_string_t item1;
+ cmdline_fixed_string_t item2;
+ cmdline_fixed_string_t value1;
+ cmdline_fixed_string_t value2;
+};
+
+static void
+cmd_config_speed_specific_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_speed_specific *res = parsed_result;
+ uint32_t link_speed;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ if (port_id_is_invalid(res->id, ENABLED_WARN))
+ return;
+
+ if (parse_and_check_speed_duplex(res->value1, res->value2,
+ &link_speed) < 0)
+ return;
+
+ ports[res->id].dev_conf.link_speeds = link_speed;
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+
+cmdline_parse_token_string_t cmd_config_speed_specific_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, port,
+ "port");
+cmdline_parse_token_string_t cmd_config_speed_specific_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, keyword,
+ "config");
+cmdline_parse_token_num_t cmd_config_speed_specific_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_speed_specific, id, UINT8);
+cmdline_parse_token_string_t cmd_config_speed_specific_item1 =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, item1,
+ "speed");
+cmdline_parse_token_string_t cmd_config_speed_specific_value1 =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, value1,
+ "10#100#1000#10000#40000#100000#auto");
+cmdline_parse_token_string_t cmd_config_speed_specific_item2 =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, item2,
+ "duplex");
+cmdline_parse_token_string_t cmd_config_speed_specific_value2 =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, value2,
+ "half#full#auto");
+
+cmdline_parse_inst_t cmd_config_speed_specific = {
+ .f = cmd_config_speed_specific_parsed,
+ .data = NULL,
+ .help_str = "port config X speed 10|100|1000|10000|40000|100000|auto duplex "
+ "half|full|auto",
+ .tokens = {
+ (void *)&cmd_config_speed_specific_port,
+ (void *)&cmd_config_speed_specific_keyword,
+ (void *)&cmd_config_speed_specific_id,
+ (void *)&cmd_config_speed_specific_item1,
+ (void *)&cmd_config_speed_specific_value1,
+ (void *)&cmd_config_speed_specific_item2,
+ (void *)&cmd_config_speed_specific_value2,
+ NULL,
+ },
+};
+
+/* *** configure txq/rxq, txd/rxd *** */
+struct cmd_config_rx_tx {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t name;
+ uint16_t value;
+};
+
+static void
+cmd_config_rx_tx_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_rx_tx *res = parsed_result;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+ if (!strcmp(res->name, "rxq")) {
+ if (!res->value && !nb_txq) {
+ printf("Warning: Either rx or tx queues should be non zero\n");
+ return;
+ }
+ nb_rxq = res->value;
+ }
+ else if (!strcmp(res->name, "txq")) {
+ if (!res->value && !nb_rxq) {
+ printf("Warning: Either rx or tx queues should be non zero\n");
+ return;
+ }
+ nb_txq = res->value;
+ }
+ else if (!strcmp(res->name, "rxd")) {
+ if (res->value <= 0 || res->value > RTE_TEST_RX_DESC_MAX) {
+ printf("rxd %d invalid - must be > 0 && <= %d\n",
+ res->value, RTE_TEST_RX_DESC_MAX);
+ return;
+ }
+ nb_rxd = res->value;
+ } else if (!strcmp(res->name, "txd")) {
+ if (res->value <= 0 || res->value > RTE_TEST_TX_DESC_MAX) {
+ printf("txd %d invalid - must be > 0 && <= %d\n",
+ res->value, RTE_TEST_TX_DESC_MAX);
+ return;
+ }
+ nb_txd = res->value;
+ } else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ init_port_config();
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_rx_tx_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_tx, port, "port");
+cmdline_parse_token_string_t cmd_config_rx_tx_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_tx, keyword, "config");
+cmdline_parse_token_string_t cmd_config_rx_tx_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_tx, all, "all");
+cmdline_parse_token_string_t cmd_config_rx_tx_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_tx, name,
+ "rxq#txq#rxd#txd");
+cmdline_parse_token_num_t cmd_config_rx_tx_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rx_tx, value, UINT16);
+
+cmdline_parse_inst_t cmd_config_rx_tx = {
+ .f = cmd_config_rx_tx_parsed,
+ .data = NULL,
+ .help_str = "port config all rxq|txq|rxd|txd value",
+ .tokens = {
+ (void *)&cmd_config_rx_tx_port,
+ (void *)&cmd_config_rx_tx_keyword,
+ (void *)&cmd_config_rx_tx_all,
+ (void *)&cmd_config_rx_tx_name,
+ (void *)&cmd_config_rx_tx_value,
+ NULL,
+ },
+};
+
+/* *** config max packet length *** */
+struct cmd_config_max_pkt_len_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t name;
+ uint32_t value;
+};
+
+static void
+cmd_config_max_pkt_len_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_max_pkt_len_result *res = parsed_result;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ if (!strcmp(res->name, "max-pkt-len")) {
+ if (res->value < ETHER_MIN_LEN) {
+ printf("max-pkt-len can not be less than %d\n",
+ ETHER_MIN_LEN);
+ return;
+ }
+ if (res->value == rx_mode.max_rx_pkt_len)
+ return;
+
+ rx_mode.max_rx_pkt_len = res->value;
+ if (res->value > ETHER_MAX_LEN)
+ rx_mode.jumbo_frame = 1;
+ else
+ rx_mode.jumbo_frame = 0;
+ } else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ init_port_config();
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_max_pkt_len_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_max_pkt_len_result, port,
+ "port");
+cmdline_parse_token_string_t cmd_config_max_pkt_len_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_max_pkt_len_result, keyword,
+ "config");
+cmdline_parse_token_string_t cmd_config_max_pkt_len_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_max_pkt_len_result, all,
+ "all");
+cmdline_parse_token_string_t cmd_config_max_pkt_len_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_max_pkt_len_result, name,
+ "max-pkt-len");
+cmdline_parse_token_num_t cmd_config_max_pkt_len_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_max_pkt_len_result, value,
+ UINT32);
+
+cmdline_parse_inst_t cmd_config_max_pkt_len = {
+ .f = cmd_config_max_pkt_len_parsed,
+ .data = NULL,
+ .help_str = "port config all max-pkt-len value",
+ .tokens = {
+ (void *)&cmd_config_max_pkt_len_port,
+ (void *)&cmd_config_max_pkt_len_keyword,
+ (void *)&cmd_config_max_pkt_len_all,
+ (void *)&cmd_config_max_pkt_len_name,
+ (void *)&cmd_config_max_pkt_len_value,
+ NULL,
+ },
+};
+
+/* *** configure port MTU *** */
+struct cmd_config_mtu_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t mtu;
+ uint8_t port_id;
+ uint16_t value;
+};
+
+static void
+cmd_config_mtu_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_mtu_result *res = parsed_result;
+
+ if (res->value < ETHER_MIN_LEN) {
+ printf("mtu cannot be less than %d\n", ETHER_MIN_LEN);
+ return;
+ }
+ port_mtu_set(res->port_id, res->value);
+}
+
+cmdline_parse_token_string_t cmd_config_mtu_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_mtu_result, port,
+ "port");
+cmdline_parse_token_string_t cmd_config_mtu_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_mtu_result, keyword,
+ "config");
+cmdline_parse_token_string_t cmd_config_mtu_mtu =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_mtu_result, keyword,
+ "mtu");
+cmdline_parse_token_num_t cmd_config_mtu_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_mtu_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_config_mtu_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_mtu_result, value, UINT16);
+
+cmdline_parse_inst_t cmd_config_mtu = {
+ .f = cmd_config_mtu_parsed,
+ .data = NULL,
+ .help_str = "port config mtu value",
+ .tokens = {
+ (void *)&cmd_config_mtu_port,
+ (void *)&cmd_config_mtu_keyword,
+ (void *)&cmd_config_mtu_mtu,
+ (void *)&cmd_config_mtu_port_id,
+ (void *)&cmd_config_mtu_value,
+ NULL,
+ },
+};
+
+/* *** configure rx mode *** */
+struct cmd_config_rx_mode_flag {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t name;
+ cmdline_fixed_string_t value;
+};
+
+static void
+cmd_config_rx_mode_flag_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_rx_mode_flag *res = parsed_result;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ if (!strcmp(res->name, "crc-strip")) {
+ if (!strcmp(res->value, "on"))
+ rx_mode.hw_strip_crc = 1;
+ else if (!strcmp(res->value, "off"))
+ rx_mode.hw_strip_crc = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+ } else if (!strcmp(res->name, "rx-cksum")) {
+ if (!strcmp(res->value, "on"))
+ rx_mode.hw_ip_checksum = 1;
+ else if (!strcmp(res->value, "off"))
+ rx_mode.hw_ip_checksum = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+ } else if (!strcmp(res->name, "hw-vlan")) {
+ if (!strcmp(res->value, "on")) {
+ rx_mode.hw_vlan_filter = 1;
+ rx_mode.hw_vlan_strip = 1;
+ }
+ else if (!strcmp(res->value, "off")) {
+ rx_mode.hw_vlan_filter = 0;
+ rx_mode.hw_vlan_strip = 0;
+ }
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+ } else if (!strcmp(res->name, "hw-vlan-filter")) {
+ if (!strcmp(res->value, "on"))
+ rx_mode.hw_vlan_filter = 1;
+ else if (!strcmp(res->value, "off"))
+ rx_mode.hw_vlan_filter = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+ } else if (!strcmp(res->name, "hw-vlan-strip")) {
+ if (!strcmp(res->value, "on"))
+ rx_mode.hw_vlan_strip = 1;
+ else if (!strcmp(res->value, "off"))
+ rx_mode.hw_vlan_strip = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+ } else if (!strcmp(res->name, "hw-vlan-extend")) {
+ if (!strcmp(res->value, "on"))
+ rx_mode.hw_vlan_extend = 1;
+ else if (!strcmp(res->value, "off"))
+ rx_mode.hw_vlan_extend = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+ } else if (!strcmp(res->name, "drop-en")) {
+ if (!strcmp(res->value, "on"))
+ rx_drop_en = 1;
+ else if (!strcmp(res->value, "off"))
+ rx_drop_en = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+ } else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ init_port_config();
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_rx_mode_flag_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, port, "port");
+cmdline_parse_token_string_t cmd_config_rx_mode_flag_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, keyword,
+ "config");
+cmdline_parse_token_string_t cmd_config_rx_mode_flag_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, all, "all");
+cmdline_parse_token_string_t cmd_config_rx_mode_flag_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, name,
+ "crc-strip#rx-cksum#hw-vlan#"
+ "hw-vlan-filter#hw-vlan-strip#hw-vlan-extend");
+cmdline_parse_token_string_t cmd_config_rx_mode_flag_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, value,
+ "on#off");
+
+cmdline_parse_inst_t cmd_config_rx_mode_flag = {
+ .f = cmd_config_rx_mode_flag_parsed,
+ .data = NULL,
+ .help_str = "port config all crc-strip|rx-cksum|hw-vlan|"
+ "hw-vlan-filter|hw-vlan-strip|hw-vlan-extend on|off",
+ .tokens = {
+ (void *)&cmd_config_rx_mode_flag_port,
+ (void *)&cmd_config_rx_mode_flag_keyword,
+ (void *)&cmd_config_rx_mode_flag_all,
+ (void *)&cmd_config_rx_mode_flag_name,
+ (void *)&cmd_config_rx_mode_flag_value,
+ NULL,
+ },
+};
+
+/* *** configure rss *** */
+struct cmd_config_rss {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t name;
+ cmdline_fixed_string_t value;
+};
+
+static void
+cmd_config_rss_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_rss *res = parsed_result;
+ struct rte_eth_rss_conf rss_conf;
+ uint8_t i;
+
+ if (!strcmp(res->value, "all"))
+ rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP |
+ ETH_RSS_UDP | ETH_RSS_SCTP |
+ ETH_RSS_L2_PAYLOAD;
+ else if (!strcmp(res->value, "ip"))
+ rss_conf.rss_hf = ETH_RSS_IP;
+ else if (!strcmp(res->value, "udp"))
+ rss_conf.rss_hf = ETH_RSS_UDP;
+ else if (!strcmp(res->value, "tcp"))
+ rss_conf.rss_hf = ETH_RSS_TCP;
+ else if (!strcmp(res->value, "sctp"))
+ rss_conf.rss_hf = ETH_RSS_SCTP;
+ else if (!strcmp(res->value, "ether"))
+ rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+ else if (!strcmp(res->value, "none"))
+ rss_conf.rss_hf = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+ rss_conf.rss_key = NULL;
+ for (i = 0; i < rte_eth_dev_count(); i++)
+ rte_eth_dev_rss_hash_update(i, &rss_conf);
+}
+
+cmdline_parse_token_string_t cmd_config_rss_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss, port, "port");
+cmdline_parse_token_string_t cmd_config_rss_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss, keyword, "config");
+cmdline_parse_token_string_t cmd_config_rss_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss, all, "all");
+cmdline_parse_token_string_t cmd_config_rss_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss, name, "rss");
+cmdline_parse_token_string_t cmd_config_rss_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss, value,
+ "all#ip#tcp#udp#sctp#ether#none");
+
+cmdline_parse_inst_t cmd_config_rss = {
+ .f = cmd_config_rss_parsed,
+ .data = NULL,
+ .help_str = "port config all rss all|ip|tcp|udp|sctp|ether|none",
+ .tokens = {
+ (void *)&cmd_config_rss_port,
+ (void *)&cmd_config_rss_keyword,
+ (void *)&cmd_config_rss_all,
+ (void *)&cmd_config_rss_name,
+ (void *)&cmd_config_rss_value,
+ NULL,
+ },
+};
+
+/* *** configure rss hash key *** */
+struct cmd_config_rss_hash_key {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ uint8_t port_id;
+ cmdline_fixed_string_t rss_hash_key;
+ cmdline_fixed_string_t rss_type;
+ cmdline_fixed_string_t key;
+};
+
+#define RSS_HASH_KEY_LENGTH 40
+static uint8_t
+hexa_digit_to_value(char hexa_digit)
+{
+ if ((hexa_digit >= '0') && (hexa_digit <= '9'))
+ return (uint8_t) (hexa_digit - '0');
+ if ((hexa_digit >= 'a') && (hexa_digit <= 'f'))
+ return (uint8_t) ((hexa_digit - 'a') + 10);
+ if ((hexa_digit >= 'A') && (hexa_digit <= 'F'))
+ return (uint8_t) ((hexa_digit - 'A') + 10);
+ /* Invalid hexa digit */
+ return 0xFF;
+}
+
+static uint8_t
+parse_and_check_key_hexa_digit(char *key, int idx)
+{
+ uint8_t hexa_v;
+
+ hexa_v = hexa_digit_to_value(key[idx]);
+ if (hexa_v == 0xFF)
+ printf("invalid key: character %c at position %d is not a "
+ "valid hexa digit\n", key[idx], idx);
+ return hexa_v;
+}
+
+static void
+cmd_config_rss_hash_key_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_rss_hash_key *res = parsed_result;
+ uint8_t hash_key[RSS_HASH_KEY_LENGTH];
+ uint8_t xdgt0;
+ uint8_t xdgt1;
+ int i;
+
+ /* Check the length of the RSS hash key */
+ if (strlen(res->key) != (RSS_HASH_KEY_LENGTH * 2)) {
+ printf("key length: %d invalid - key must be a string of %d"
+ "hexa-decimal numbers\n", (int) strlen(res->key),
+ RSS_HASH_KEY_LENGTH * 2);
+ return;
+ }
+ /* Translate RSS hash key into binary representation */
+ for (i = 0; i < RSS_HASH_KEY_LENGTH; i++) {
+ xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+ if (xdgt0 == 0xFF)
+ return;
+ xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+ if (xdgt1 == 0xFF)
+ return;
+ hash_key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+ }
+ port_rss_hash_key_update(res->port_id, res->rss_type, hash_key,
+ RSS_HASH_KEY_LENGTH);
+}
+
+cmdline_parse_token_string_t cmd_config_rss_hash_key_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, port, "port");
+cmdline_parse_token_string_t cmd_config_rss_hash_key_config =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, config,
+ "config");
+cmdline_parse_token_num_t cmd_config_rss_hash_key_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rss_hash_key, port_id, UINT8);
+cmdline_parse_token_string_t cmd_config_rss_hash_key_rss_hash_key =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key,
+ rss_hash_key, "rss-hash-key");
+cmdline_parse_token_string_t cmd_config_rss_hash_key_rss_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, rss_type,
+ "ipv4#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#"
+ "ipv4-other#ipv6#ipv6-frag#ipv6-tcp#ipv6-udp#"
+ "ipv6-sctp#ipv6-other#l2-payload#ipv6-ex#"
+ "ipv6-tcp-ex#ipv6-udp-ex");
+cmdline_parse_token_string_t cmd_config_rss_hash_key_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, key, NULL);
+
+cmdline_parse_inst_t cmd_config_rss_hash_key = {
+ .f = cmd_config_rss_hash_key_parsed,
+ .data = NULL,
+ .help_str =
+ "port config X rss-hash-key ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|"
+ "ipv4-sctp|ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|"
+ "ipv6-sctp|ipv6-other|l2-payload|"
+ "ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex 80 hexa digits\n",
+ .tokens = {
+ (void *)&cmd_config_rss_hash_key_port,
+ (void *)&cmd_config_rss_hash_key_config,
+ (void *)&cmd_config_rss_hash_key_port_id,
+ (void *)&cmd_config_rss_hash_key_rss_hash_key,
+ (void *)&cmd_config_rss_hash_key_rss_type,
+ (void *)&cmd_config_rss_hash_key_value,
+ NULL,
+ },
+};
+
+/* *** configure port rxq/txq start/stop *** */
+struct cmd_config_rxtx_queue {
+ cmdline_fixed_string_t port;
+ uint8_t portid;
+ cmdline_fixed_string_t rxtxq;
+ uint16_t qid;
+ cmdline_fixed_string_t opname;
+};
+
+static void
+cmd_config_rxtx_queue_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_rxtx_queue *res = parsed_result;
+ uint8_t isrx;
+ uint8_t isstart;
+ int ret = 0;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+
+ if (port_id_is_invalid(res->portid, ENABLED_WARN))
+ return;
+
+ if (port_is_started(res->portid) != 1) {
+ printf("Please start port %u first\n", res->portid);
+ return;
+ }
+
+ if (!strcmp(res->rxtxq, "rxq"))
+ isrx = 1;
+ else if (!strcmp(res->rxtxq, "txq"))
+ isrx = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (isrx && rx_queue_id_is_invalid(res->qid))
+ return;
+ else if (!isrx && tx_queue_id_is_invalid(res->qid))
+ return;
+
+ if (!strcmp(res->opname, "start"))
+ isstart = 1;
+ else if (!strcmp(res->opname, "stop"))
+ isstart = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (isstart && isrx)
+ ret = rte_eth_dev_rx_queue_start(res->portid, res->qid);
+ else if (!isstart && isrx)
+ ret = rte_eth_dev_rx_queue_stop(res->portid, res->qid);
+ else if (isstart && !isrx)
+ ret = rte_eth_dev_tx_queue_start(res->portid, res->qid);
+ else
+ ret = rte_eth_dev_tx_queue_stop(res->portid, res->qid);
+
+ if (ret == -ENOTSUP)
+ printf("Function not supported in PMD driver\n");
+}
+
+cmdline_parse_token_string_t cmd_config_rxtx_queue_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, port, "port");
+cmdline_parse_token_num_t cmd_config_rxtx_queue_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_queue, portid, UINT8);
+cmdline_parse_token_string_t cmd_config_rxtx_queue_rxtxq =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, rxtxq, "rxq#txq");
+cmdline_parse_token_num_t cmd_config_rxtx_queue_qid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_queue, qid, UINT16);
+cmdline_parse_token_string_t cmd_config_rxtx_queue_opname =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, opname,
+ "start#stop");
+
+cmdline_parse_inst_t cmd_config_rxtx_queue = {
+ .f = cmd_config_rxtx_queue_parsed,
+ .data = NULL,
+ .help_str = "port X rxq|txq ID start|stop",
+ .tokens = {
+ (void *)&cmd_config_speed_all_port,
+ (void *)&cmd_config_rxtx_queue_portid,
+ (void *)&cmd_config_rxtx_queue_rxtxq,
+ (void *)&cmd_config_rxtx_queue_qid,
+ (void *)&cmd_config_rxtx_queue_opname,
+ NULL,
+ },
+};
+
+/* *** Configure RSS RETA *** */
+struct cmd_config_rss_reta {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ uint8_t port_id;
+ cmdline_fixed_string_t name;
+ cmdline_fixed_string_t list_name;
+ cmdline_fixed_string_t list_of_items;
+};
+
+static int
+parse_reta_config(const char *str,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t nb_entries)
+{
+ int i;
+ unsigned size;
+ uint16_t hash_index, idx, shift;
+ uint16_t nb_queue;
+ char s[256];
+ const char *p, *p0 = str;
+ char *end;
+ enum fieldnames {
+ FLD_HASH_INDEX = 0,
+ FLD_QUEUE,
+ _NUM_FLD
+ };
+ unsigned long int_fld[_NUM_FLD];
+ char *str_fld[_NUM_FLD];
+
+ while ((p = strchr(p0,'(')) != NULL) {
+ ++p;
+ if((p0 = strchr(p,')')) == NULL)
+ return -1;
+
+ size = p0 - p;
+ if(size >= sizeof(s))
+ return -1;
+
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
+ return -1;
+ for (i = 0; i < _NUM_FLD; i++) {
+ errno = 0;
+ int_fld[i] = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || end == str_fld[i] ||
+ int_fld[i] > 65535)
+ return -1;
+ }
+
+ hash_index = (uint16_t)int_fld[FLD_HASH_INDEX];
+ nb_queue = (uint16_t)int_fld[FLD_QUEUE];
+
+ if (hash_index >= nb_entries) {
+ printf("Invalid RETA hash index=%d\n", hash_index);
+ return -1;
+ }
+
+ idx = hash_index / RTE_RETA_GROUP_SIZE;
+ shift = hash_index % RTE_RETA_GROUP_SIZE;
+ reta_conf[idx].mask |= (1ULL << shift);
+ reta_conf[idx].reta[shift] = nb_queue;
+ }
+
+ return 0;
+}
+
+static void
+cmd_set_rss_reta_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rss_reta_entry64 reta_conf[8];
+ struct cmd_config_rss_reta *res = parsed_result;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(res->port_id, &dev_info);
+ if (dev_info.reta_size == 0) {
+ printf("Redirection table size is 0 which is "
+ "invalid for RSS\n");
+ return;
+ } else
+ printf("The reta size of port %d is %u\n",
+ res->port_id, dev_info.reta_size);
+ if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
+ printf("Currently do not support more than %u entries of "
+ "redirection table\n", ETH_RSS_RETA_SIZE_512);
+ return;
+ }
+
+ memset(reta_conf, 0, sizeof(reta_conf));
+ if (!strcmp(res->list_name, "reta")) {
+ if (parse_reta_config(res->list_of_items, reta_conf,
+ dev_info.reta_size)) {
+ printf("Invalid RSS Redirection Table "
+ "config entered\n");
+ return;
+ }
+ ret = rte_eth_dev_rss_reta_update(res->port_id,
+ reta_conf, dev_info.reta_size);
+ if (ret != 0)
+ printf("Bad redirection table parameter, "
+ "return code = %d \n", ret);
+ }
+}
+
+cmdline_parse_token_string_t cmd_config_rss_reta_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, port, "port");
+cmdline_parse_token_string_t cmd_config_rss_reta_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, keyword, "config");
+cmdline_parse_token_num_t cmd_config_rss_reta_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rss_reta, port_id, UINT8);
+cmdline_parse_token_string_t cmd_config_rss_reta_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, name, "rss");
+cmdline_parse_token_string_t cmd_config_rss_reta_list_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, list_name, "reta");
+cmdline_parse_token_string_t cmd_config_rss_reta_list_of_items =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, list_of_items,
+ NULL);
+cmdline_parse_inst_t cmd_config_rss_reta = {
+ .f = cmd_set_rss_reta_parsed,
+ .data = NULL,
+ .help_str = "port config X rss reta (hash,queue)[,(hash,queue)]",
+ .tokens = {
+ (void *)&cmd_config_rss_reta_port,
+ (void *)&cmd_config_rss_reta_keyword,
+ (void *)&cmd_config_rss_reta_port_id,
+ (void *)&cmd_config_rss_reta_name,
+ (void *)&cmd_config_rss_reta_list_name,
+ (void *)&cmd_config_rss_reta_list_of_items,
+ NULL,
+ },
+};
+
+/* *** SHOW PORT RETA INFO *** */
+struct cmd_showport_reta {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t rss;
+ cmdline_fixed_string_t reta;
+ uint16_t size;
+ cmdline_fixed_string_t list_of_items;
+};
+
+static int
+showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
+ uint16_t nb_entries,
+ char *str)
+{
+ uint32_t size;
+ const char *p, *p0 = str;
+ char s[256];
+ char *end;
+ char *str_fld[8];
+ uint16_t i, num = nb_entries / RTE_RETA_GROUP_SIZE;
+ int ret;
+
+ p = strchr(p0, '(');
+ if (p == NULL)
+ return -1;
+ p++;
+ p0 = strchr(p, ')');
+ if (p0 == NULL)
+ return -1;
+ size = p0 - p;
+ if (size >= sizeof(s)) {
+ printf("The string size exceeds the internal buffer size\n");
+ return -1;
+ }
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ ret = rte_strsplit(s, sizeof(s), str_fld, num, ',');
+ if (ret <= 0 || ret != num) {
+ printf("The bits of masks do not match the number of "
+ "reta entries: %u\n", num);
+ return -1;
+ }
+ for (i = 0; i < ret; i++)
+ conf[i].mask = (uint64_t)strtoul(str_fld[i], &end, 0);
+
+ return 0;
+}
+
+static void
+cmd_showport_reta_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_showport_reta *res = parsed_result;
+ struct rte_eth_rss_reta_entry64 reta_conf[8];
+ struct rte_eth_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(res->port_id, &dev_info);
+ if (dev_info.reta_size == 0 || res->size != dev_info.reta_size ||
+ res->size > ETH_RSS_RETA_SIZE_512) {
+ printf("Invalid redirection table size: %u\n", res->size);
+ return;
+ }
+
+ memset(reta_conf, 0, sizeof(reta_conf));
+ if (showport_parse_reta_config(reta_conf, res->size,
+ res->list_of_items) < 0) {
+ printf("Invalid string: %s for reta masks\n",
+ res->list_of_items);
+ return;
+ }
+ port_rss_reta_info(res->port_id, reta_conf, res->size);
+}
+
+cmdline_parse_token_string_t cmd_showport_reta_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, show, "show");
+cmdline_parse_token_string_t cmd_showport_reta_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, port, "port");
+cmdline_parse_token_num_t cmd_showport_reta_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_showport_reta, port_id, UINT8);
+cmdline_parse_token_string_t cmd_showport_reta_rss =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, rss, "rss");
+cmdline_parse_token_string_t cmd_showport_reta_reta =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, reta, "reta");
+cmdline_parse_token_num_t cmd_showport_reta_size =
+ TOKEN_NUM_INITIALIZER(struct cmd_showport_reta, size, UINT16);
+cmdline_parse_token_string_t cmd_showport_reta_list_of_items =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_reta,
+ list_of_items, NULL);
+
+cmdline_parse_inst_t cmd_showport_reta = {
+ .f = cmd_showport_reta_parsed,
+ .data = NULL,
+ .help_str = "show port X rss reta (size) (mask0,mask1,...)",
+ .tokens = {
+ (void *)&cmd_showport_reta_show,
+ (void *)&cmd_showport_reta_port,
+ (void *)&cmd_showport_reta_port_id,
+ (void *)&cmd_showport_reta_rss,
+ (void *)&cmd_showport_reta_reta,
+ (void *)&cmd_showport_reta_size,
+ (void *)&cmd_showport_reta_list_of_items,
+ NULL,
+ },
+};
+
+/* *** Show RSS hash configuration *** */
+struct cmd_showport_rss_hash {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t rss_hash;
+ cmdline_fixed_string_t rss_type;
+ cmdline_fixed_string_t key; /* optional argument */
+};
+
+static void cmd_showport_rss_hash_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *show_rss_key)
+{
+ struct cmd_showport_rss_hash *res = parsed_result;
+
+ port_rss_hash_conf_show(res->port_id, res->rss_type,
+ show_rss_key != NULL);
+}
+
+cmdline_parse_token_string_t cmd_showport_rss_hash_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, show, "show");
+cmdline_parse_token_string_t cmd_showport_rss_hash_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, port, "port");
+cmdline_parse_token_num_t cmd_showport_rss_hash_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_showport_rss_hash, port_id, UINT8);
+cmdline_parse_token_string_t cmd_showport_rss_hash_rss_hash =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, rss_hash,
+ "rss-hash");
+cmdline_parse_token_string_t cmd_showport_rss_hash_rss_hash_info =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, rss_type,
+ "ipv4#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#"
+ "ipv4-other#ipv6#ipv6-frag#ipv6-tcp#ipv6-udp#"
+ "ipv6-sctp#ipv6-other#l2-payload#ipv6-ex#"
+ "ipv6-tcp-ex#ipv6-udp-ex");
+cmdline_parse_token_string_t cmd_showport_rss_hash_rss_key =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, key, "key");
+
+cmdline_parse_inst_t cmd_showport_rss_hash = {
+ .f = cmd_showport_rss_hash_parsed,
+ .data = NULL,
+ .help_str =
+ "show port X rss-hash ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|"
+ "ipv4-sctp|ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|"
+ "ipv6-sctp|ipv6-other|l2-payload|"
+ "ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex (X = port number)\n",
+ .tokens = {
+ (void *)&cmd_showport_rss_hash_show,
+ (void *)&cmd_showport_rss_hash_port,
+ (void *)&cmd_showport_rss_hash_port_id,
+ (void *)&cmd_showport_rss_hash_rss_hash,
+ (void *)&cmd_showport_rss_hash_rss_hash_info,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_showport_rss_hash_key = {
+ .f = cmd_showport_rss_hash_parsed,
+ .data = (void *)1,
+ .help_str =
+ "show port X rss-hash ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|"
+ "ipv4-sctp|ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|"
+ "ipv6-sctp|ipv6-other|l2-payload|"
+ "ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex key (X = port number)\n",
+ .tokens = {
+ (void *)&cmd_showport_rss_hash_show,
+ (void *)&cmd_showport_rss_hash_port,
+ (void *)&cmd_showport_rss_hash_port_id,
+ (void *)&cmd_showport_rss_hash_rss_hash,
+ (void *)&cmd_showport_rss_hash_rss_hash_info,
+ (void *)&cmd_showport_rss_hash_rss_key,
+ NULL,
+ },
+};
+
+/* *** Configure DCB *** */
+struct cmd_config_dcb {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ uint8_t port_id;
+ cmdline_fixed_string_t dcb;
+ cmdline_fixed_string_t vt;
+ cmdline_fixed_string_t vt_en;
+ uint8_t num_tcs;
+ cmdline_fixed_string_t pfc;
+ cmdline_fixed_string_t pfc_en;
+};
+
+static void
+cmd_config_dcb_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_dcb *res = parsed_result;
+ portid_t port_id = res->port_id;
+ struct rte_port *port;
+ uint8_t pfc_en;
+ int ret;
+
+ port = &ports[port_id];
+ /** Check if the port is not started **/
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Please stop port %d first\n", port_id);
+ return;
+ }
+
+ if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
+ printf("The invalid number of traffic class,"
+ " only 4 or 8 allowed.\n");
+ return;
+ }
+
+ if (nb_fwd_lcores < res->num_tcs) {
+ printf("nb_cores shouldn't be less than number of TCs.\n");
+ return;
+ }
+ if (!strncmp(res->pfc_en, "on", 2))
+ pfc_en = 1;
+ else
+ pfc_en = 0;
+
+ /* DCB in VT mode */
+ if (!strncmp(res->vt_en, "on", 2))
+ ret = init_port_dcb_config(port_id, DCB_VT_ENABLED,
+ (enum rte_eth_nb_tcs)res->num_tcs,
+ pfc_en);
+ else
+ ret = init_port_dcb_config(port_id, DCB_ENABLED,
+ (enum rte_eth_nb_tcs)res->num_tcs,
+ pfc_en);
+
+
+ if (ret != 0) {
+ printf("Cannot initialize network ports.\n");
+ return;
+ }
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_dcb_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, port, "port");
+cmdline_parse_token_string_t cmd_config_dcb_config =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, config, "config");
+cmdline_parse_token_num_t cmd_config_dcb_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_dcb, port_id, UINT8);
+cmdline_parse_token_string_t cmd_config_dcb_dcb =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, dcb, "dcb");
+cmdline_parse_token_string_t cmd_config_dcb_vt =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, vt, "vt");
+cmdline_parse_token_string_t cmd_config_dcb_vt_en =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, vt_en, "on#off");
+cmdline_parse_token_num_t cmd_config_dcb_num_tcs =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_dcb, num_tcs, UINT8);
+cmdline_parse_token_string_t cmd_config_dcb_pfc=
+ TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, pfc, "pfc");
+cmdline_parse_token_string_t cmd_config_dcb_pfc_en =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, pfc_en, "on#off");
+
+cmdline_parse_inst_t cmd_config_dcb = {
+ .f = cmd_config_dcb_parsed,
+ .data = NULL,
+ .help_str = "port config port-id dcb vt on|off nb-tcs pfc on|off",
+ .tokens = {
+ (void *)&cmd_config_dcb_port,
+ (void *)&cmd_config_dcb_config,
+ (void *)&cmd_config_dcb_port_id,
+ (void *)&cmd_config_dcb_dcb,
+ (void *)&cmd_config_dcb_vt,
+ (void *)&cmd_config_dcb_vt_en,
+ (void *)&cmd_config_dcb_num_tcs,
+ (void *)&cmd_config_dcb_pfc,
+ (void *)&cmd_config_dcb_pfc_en,
+ NULL,
+ },
+};
+
+/* *** configure number of packets per burst *** */
+struct cmd_config_burst {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t name;
+ uint16_t value;
+};
+
+static void
+cmd_config_burst_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_burst *res = parsed_result;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ if (!strcmp(res->name, "burst")) {
+ if (res->value < 1 || res->value > MAX_PKT_BURST) {
+ printf("burst must be >= 1 && <= %d\n", MAX_PKT_BURST);
+ return;
+ }
+ nb_pkt_per_burst = res->value;
+ } else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ init_port_config();
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_burst_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_burst, port, "port");
+cmdline_parse_token_string_t cmd_config_burst_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_burst, keyword, "config");
+cmdline_parse_token_string_t cmd_config_burst_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_burst, all, "all");
+cmdline_parse_token_string_t cmd_config_burst_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_burst, name, "burst");
+cmdline_parse_token_num_t cmd_config_burst_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_burst, value, UINT16);
+
+cmdline_parse_inst_t cmd_config_burst = {
+ .f = cmd_config_burst_parsed,
+ .data = NULL,
+ .help_str = "port config all burst value",
+ .tokens = {
+ (void *)&cmd_config_burst_port,
+ (void *)&cmd_config_burst_keyword,
+ (void *)&cmd_config_burst_all,
+ (void *)&cmd_config_burst_name,
+ (void *)&cmd_config_burst_value,
+ NULL,
+ },
+};
+
+/* *** configure rx/tx queues *** */
+struct cmd_config_thresh {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t name;
+ uint8_t value;
+};
+
+static void
+cmd_config_thresh_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_thresh *res = parsed_result;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ if (!strcmp(res->name, "txpt"))
+ tx_pthresh = res->value;
+ else if(!strcmp(res->name, "txht"))
+ tx_hthresh = res->value;
+ else if(!strcmp(res->name, "txwt"))
+ tx_wthresh = res->value;
+ else if(!strcmp(res->name, "rxpt"))
+ rx_pthresh = res->value;
+ else if(!strcmp(res->name, "rxht"))
+ rx_hthresh = res->value;
+ else if(!strcmp(res->name, "rxwt"))
+ rx_wthresh = res->value;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ init_port_config();
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_thresh_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_thresh, port, "port");
+cmdline_parse_token_string_t cmd_config_thresh_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_thresh, keyword, "config");
+cmdline_parse_token_string_t cmd_config_thresh_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_thresh, all, "all");
+cmdline_parse_token_string_t cmd_config_thresh_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_thresh, name,
+ "txpt#txht#txwt#rxpt#rxht#rxwt");
+cmdline_parse_token_num_t cmd_config_thresh_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_thresh, value, UINT8);
+
+cmdline_parse_inst_t cmd_config_thresh = {
+ .f = cmd_config_thresh_parsed,
+ .data = NULL,
+ .help_str = "port config all txpt|txht|txwt|rxpt|rxht|rxwt value",
+ .tokens = {
+ (void *)&cmd_config_thresh_port,
+ (void *)&cmd_config_thresh_keyword,
+ (void *)&cmd_config_thresh_all,
+ (void *)&cmd_config_thresh_name,
+ (void *)&cmd_config_thresh_value,
+ NULL,
+ },
+};
+
+/* *** configure free/rs threshold *** */
+struct cmd_config_threshold {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t name;
+ uint16_t value;
+};
+
+static void
+cmd_config_threshold_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_threshold *res = parsed_result;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ if (!strcmp(res->name, "txfreet"))
+ tx_free_thresh = res->value;
+ else if (!strcmp(res->name, "txrst"))
+ tx_rs_thresh = res->value;
+ else if (!strcmp(res->name, "rxfreet"))
+ rx_free_thresh = res->value;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ init_port_config();
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_threshold_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_threshold, port, "port");
+cmdline_parse_token_string_t cmd_config_threshold_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_threshold, keyword,
+ "config");
+cmdline_parse_token_string_t cmd_config_threshold_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_threshold, all, "all");
+cmdline_parse_token_string_t cmd_config_threshold_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_threshold, name,
+ "txfreet#txrst#rxfreet");
+cmdline_parse_token_num_t cmd_config_threshold_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_threshold, value, UINT16);
+
+cmdline_parse_inst_t cmd_config_threshold = {
+ .f = cmd_config_threshold_parsed,
+ .data = NULL,
+ .help_str = "port config all txfreet|txrst|rxfreet value",
+ .tokens = {
+ (void *)&cmd_config_threshold_port,
+ (void *)&cmd_config_threshold_keyword,
+ (void *)&cmd_config_threshold_all,
+ (void *)&cmd_config_threshold_name,
+ (void *)&cmd_config_threshold_value,
+ NULL,
+ },
+};
+
+/* *** stop *** */
+struct cmd_stop_result {
+ cmdline_fixed_string_t stop;
+};
+
+static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ stop_packet_forwarding();
+}
+
+cmdline_parse_token_string_t cmd_stop_stop =
+ TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop");
+
+cmdline_parse_inst_t cmd_stop = {
+ .f = cmd_stop_parsed,
+ .data = NULL,
+ .help_str = "stop - stop packet forwarding",
+ .tokens = {
+ (void *)&cmd_stop_stop,
+ NULL,
+ },
+};
+
+/* *** SET CORELIST and PORTLIST CONFIGURATION *** */
+
+unsigned int
+parse_item_list(char* str, const char* item_name, unsigned int max_items,
+ unsigned int *parsed_items, int check_unique_values)
+{
+ unsigned int nb_item;
+ unsigned int value;
+ unsigned int i;
+ unsigned int j;
+ int value_ok;
+ char c;
+
+ /*
+ * First parse all items in the list and store their value.
+ */
+ value = 0;
+ nb_item = 0;
+ value_ok = 0;
+ for (i = 0; i < strnlen(str, STR_TOKEN_SIZE); i++) {
+ c = str[i];
+ if ((c >= '0') && (c <= '9')) {
+ value = (unsigned int) (value * 10 + (c - '0'));
+ value_ok = 1;
+ continue;
+ }
+ if (c != ',') {
+ printf("character %c is not a decimal digit\n", c);
+ return 0;
+ }
+ if (! value_ok) {
+ printf("No valid value before comma\n");
+ return 0;
+ }
+ if (nb_item < max_items) {
+ parsed_items[nb_item] = value;
+ value_ok = 0;
+ value = 0;
+ }
+ nb_item++;
+ }
+ if (nb_item >= max_items) {
+ printf("Number of %s = %u > %u (maximum items)\n",
+ item_name, nb_item + 1, max_items);
+ return 0;
+ }
+ parsed_items[nb_item++] = value;
+ if (! check_unique_values)
+ return nb_item;
+
+ /*
+ * Then, check that all values in the list are differents.
+ * No optimization here...
+ */
+ for (i = 0; i < nb_item; i++) {
+ for (j = i + 1; j < nb_item; j++) {
+ if (parsed_items[j] == parsed_items[i]) {
+ printf("duplicated %s %u at index %u and %u\n",
+ item_name, parsed_items[i], i, j);
+ return 0;
+ }
+ }
+ }
+ return nb_item;
+}
+
+struct cmd_set_list_result {
+ cmdline_fixed_string_t cmd_keyword;
+ cmdline_fixed_string_t list_name;
+ cmdline_fixed_string_t list_of_items;
+};
+
+static void cmd_set_list_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_list_result *res;
+ union {
+ unsigned int lcorelist[RTE_MAX_LCORE];
+ unsigned int portlist[RTE_MAX_ETHPORTS];
+ } parsed_items;
+ unsigned int nb_item;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+
+ res = parsed_result;
+ if (!strcmp(res->list_name, "corelist")) {
+ nb_item = parse_item_list(res->list_of_items, "core",
+ RTE_MAX_LCORE,
+ parsed_items.lcorelist, 1);
+ if (nb_item > 0)
+ set_fwd_lcores_list(parsed_items.lcorelist, nb_item);
+ return;
+ }
+ if (!strcmp(res->list_name, "portlist")) {
+ nb_item = parse_item_list(res->list_of_items, "port",
+ RTE_MAX_ETHPORTS,
+ parsed_items.portlist, 1);
+ if (nb_item > 0)
+ set_fwd_ports_list(parsed_items.portlist, nb_item);
+ }
+}
+
+cmdline_parse_token_string_t cmd_set_list_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, cmd_keyword,
+ "set");
+cmdline_parse_token_string_t cmd_set_list_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, list_name,
+ "corelist#portlist");
+cmdline_parse_token_string_t cmd_set_list_of_items =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, list_of_items,
+ NULL);
+
+cmdline_parse_inst_t cmd_set_fwd_list = {
+ .f = cmd_set_list_parsed,
+ .data = NULL,
+ .help_str = "set corelist|portlist x[,y]*",
+ .tokens = {
+ (void *)&cmd_set_list_keyword,
+ (void *)&cmd_set_list_name,
+ (void *)&cmd_set_list_of_items,
+ NULL,
+ },
+};
+
+/* *** SET COREMASK and PORTMASK CONFIGURATION *** */
+
+struct cmd_setmask_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t mask;
+ uint64_t hexavalue;
+};
+
+static void cmd_set_mask_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_setmask_result *res = parsed_result;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+ if (!strcmp(res->mask, "coremask"))
+ set_fwd_lcores_mask(res->hexavalue);
+ else if (!strcmp(res->mask, "portmask"))
+ set_fwd_ports_mask(res->hexavalue);
+}
+
+cmdline_parse_token_string_t cmd_setmask_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_setmask_result, set, "set");
+cmdline_parse_token_string_t cmd_setmask_mask =
+ TOKEN_STRING_INITIALIZER(struct cmd_setmask_result, mask,
+ "coremask#portmask");
+cmdline_parse_token_num_t cmd_setmask_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_setmask_result, hexavalue, UINT64);
+
+cmdline_parse_inst_t cmd_set_fwd_mask = {
+ .f = cmd_set_mask_parsed,
+ .data = NULL,
+ .help_str = "set coremask|portmask hexadecimal value",
+ .tokens = {
+ (void *)&cmd_setmask_set,
+ (void *)&cmd_setmask_mask,
+ (void *)&cmd_setmask_value,
+ NULL,
+ },
+};
+
+/*
+ * SET NBPORT, NBCORE, PACKET BURST, and VERBOSE LEVEL CONFIGURATION
+ */
+struct cmd_set_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t what;
+ uint16_t value;
+};
+
+static void cmd_set_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_result *res = parsed_result;
+ if (!strcmp(res->what, "nbport"))
+ set_fwd_ports_number(res->value);
+ else if (!strcmp(res->what, "nbcore"))
+ set_fwd_lcores_number(res->value);
+ else if (!strcmp(res->what, "burst"))
+ set_nb_pkt_per_burst(res->value);
+ else if (!strcmp(res->what, "verbose"))
+ set_verbose_level(res->value);
+}
+
+cmdline_parse_token_string_t cmd_set_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_result, set, "set");
+cmdline_parse_token_string_t cmd_set_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_result, what,
+ "nbport#nbcore#burst#verbose");
+cmdline_parse_token_num_t cmd_set_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_result, value, UINT16);
+
+cmdline_parse_inst_t cmd_set_numbers = {
+ .f = cmd_set_parsed,
+ .data = NULL,
+ .help_str = "set nbport|nbcore|burst|verbose value",
+ .tokens = {
+ (void *)&cmd_set_set,
+ (void *)&cmd_set_what,
+ (void *)&cmd_set_value,
+ NULL,
+ },
+};
+
+/* *** SET SEGMENT LENGTHS OF TXONLY PACKETS *** */
+
+struct cmd_set_txpkts_result {
+ cmdline_fixed_string_t cmd_keyword;
+ cmdline_fixed_string_t txpkts;
+ cmdline_fixed_string_t seg_lengths;
+};
+
+static void
+cmd_set_txpkts_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_txpkts_result *res;
+ unsigned seg_lengths[RTE_MAX_SEGS_PER_PKT];
+ unsigned int nb_segs;
+
+ res = parsed_result;
+ nb_segs = parse_item_list(res->seg_lengths, "segment lengths",
+ RTE_MAX_SEGS_PER_PKT, seg_lengths, 0);
+ if (nb_segs > 0)
+ set_tx_pkt_segments(seg_lengths, nb_segs);
+}
+
+cmdline_parse_token_string_t cmd_set_txpkts_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result,
+ cmd_keyword, "set");
+cmdline_parse_token_string_t cmd_set_txpkts_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result,
+ txpkts, "txpkts");
+cmdline_parse_token_string_t cmd_set_txpkts_lengths =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result,
+ seg_lengths, NULL);
+
+cmdline_parse_inst_t cmd_set_txpkts = {
+ .f = cmd_set_txpkts_parsed,
+ .data = NULL,
+ .help_str = "set txpkts x[,y]*",
+ .tokens = {
+ (void *)&cmd_set_txpkts_keyword,
+ (void *)&cmd_set_txpkts_name,
+ (void *)&cmd_set_txpkts_lengths,
+ NULL,
+ },
+};
+
+/* *** SET COPY AND SPLIT POLICY ON TX PACKETS *** */
+
+struct cmd_set_txsplit_result {
+ cmdline_fixed_string_t cmd_keyword;
+ cmdline_fixed_string_t txsplit;
+ cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_txsplit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_txsplit_result *res;
+
+ res = parsed_result;
+ set_tx_pkt_split(res->mode);
+}
+
+cmdline_parse_token_string_t cmd_set_txsplit_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+ cmd_keyword, "set");
+cmdline_parse_token_string_t cmd_set_txsplit_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+ txsplit, "txsplit");
+cmdline_parse_token_string_t cmd_set_txsplit_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+ mode, NULL);
+
+cmdline_parse_inst_t cmd_set_txsplit = {
+ .f = cmd_set_txsplit_parsed,
+ .data = NULL,
+ .help_str = "set txsplit on|off|rand",
+ .tokens = {
+ (void *)&cmd_set_txsplit_keyword,
+ (void *)&cmd_set_txsplit_name,
+ (void *)&cmd_set_txsplit_mode,
+ NULL,
+ },
+};
+
+/* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */
+struct cmd_rx_vlan_filter_all_result {
+ cmdline_fixed_string_t rx_vlan;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t all;
+ uint8_t port_id;
+};
+
+static void
+cmd_rx_vlan_filter_all_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_rx_vlan_filter_all_result *res = parsed_result;
+
+ if (!strcmp(res->what, "add"))
+ rx_vlan_all_filter_set(res->port_id, 1);
+ else
+ rx_vlan_all_filter_set(res->port_id, 0);
+}
+
+cmdline_parse_token_string_t cmd_rx_vlan_filter_all_rx_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
+ rx_vlan, "rx_vlan");
+cmdline_parse_token_string_t cmd_rx_vlan_filter_all_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
+ what, "add#rm");
+cmdline_parse_token_string_t cmd_rx_vlan_filter_all_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
+ all, "all");
+cmdline_parse_token_num_t cmd_rx_vlan_filter_all_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_rx_vlan_filter_all = {
+ .f = cmd_rx_vlan_filter_all_parsed,
+ .data = NULL,
+ .help_str = "add/remove all identifiers to/from the set of VLAN "
+ "Identifiers filtered by a port",
+ .tokens = {
+ (void *)&cmd_rx_vlan_filter_all_rx_vlan,
+ (void *)&cmd_rx_vlan_filter_all_what,
+ (void *)&cmd_rx_vlan_filter_all_all,
+ (void *)&cmd_rx_vlan_filter_all_portid,
+ NULL,
+ },
+};
+
+/* *** VLAN OFFLOAD SET ON A PORT *** */
+struct cmd_vlan_offload_result {
+ cmdline_fixed_string_t vlan;
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vlan_type;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t on;
+ cmdline_fixed_string_t port_id;
+};
+
+static void
+cmd_vlan_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int on;
+ struct cmd_vlan_offload_result *res = parsed_result;
+ char *str;
+ int i, len = 0;
+ portid_t port_id = 0;
+ unsigned int tmp;
+
+ str = res->port_id;
+ len = strnlen(str, STR_TOKEN_SIZE);
+ i = 0;
+ /* Get port_id first */
+ while(i < len){
+ if(str[i] == ',')
+ break;
+
+ i++;
+ }
+ str[i]='\0';
+ tmp = strtoul(str, NULL, 0);
+ /* If port_id greater that what portid_t can represent, return */
+ if(tmp >= RTE_MAX_ETHPORTS)
+ return;
+ port_id = (portid_t)tmp;
+
+ if (!strcmp(res->on, "on"))
+ on = 1;
+ else
+ on = 0;
+
+ if (!strcmp(res->what, "strip"))
+ rx_vlan_strip_set(port_id, on);
+ else if(!strcmp(res->what, "stripq")){
+ uint16_t queue_id = 0;
+
+ /* No queue_id, return */
+ if(i + 1 >= len) {
+ printf("must specify (port,queue_id)\n");
+ return;
+ }
+ tmp = strtoul(str + i + 1, NULL, 0);
+ /* If queue_id greater that what 16-bits can represent, return */
+ if(tmp > 0xffff)
+ return;
+
+ queue_id = (uint16_t)tmp;
+ rx_vlan_strip_set_on_queue(port_id, queue_id, on);
+ }
+ else if (!strcmp(res->what, "filter"))
+ rx_vlan_filter_set(port_id, on);
+ else
+ vlan_extend_set(port_id, on);
+
+ return;
+}
+
+cmdline_parse_token_string_t cmd_vlan_offload_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result,
+ vlan, "vlan");
+cmdline_parse_token_string_t cmd_vlan_offload_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_vlan_offload_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result,
+ what, "strip#filter#qinq#stripq");
+cmdline_parse_token_string_t cmd_vlan_offload_on =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result,
+ on, "on#off");
+cmdline_parse_token_string_t cmd_vlan_offload_portid =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result,
+ port_id, NULL);
+
+cmdline_parse_inst_t cmd_vlan_offload = {
+ .f = cmd_vlan_offload_parsed,
+ .data = NULL,
+ .help_str = "set strip|filter|qinq|stripq on|off port_id[,queue_id], filter/strip for rx side"
+ " qinq(extended) for both rx/tx sides ",
+ .tokens = {
+ (void *)&cmd_vlan_offload_vlan,
+ (void *)&cmd_vlan_offload_set,
+ (void *)&cmd_vlan_offload_what,
+ (void *)&cmd_vlan_offload_on,
+ (void *)&cmd_vlan_offload_portid,
+ NULL,
+ },
+};
+
+/* *** VLAN TPID SET ON A PORT *** */
+struct cmd_vlan_tpid_result {
+ cmdline_fixed_string_t vlan;
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vlan_type;
+ cmdline_fixed_string_t what;
+ uint16_t tp_id;
+ uint8_t port_id;
+};
+
+static void
+cmd_vlan_tpid_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vlan_tpid_result *res = parsed_result;
+ enum rte_vlan_type vlan_type;
+
+ if (!strcmp(res->vlan_type, "inner"))
+ vlan_type = ETH_VLAN_TYPE_INNER;
+ else if (!strcmp(res->vlan_type, "outer"))
+ vlan_type = ETH_VLAN_TYPE_OUTER;
+ else {
+ printf("Unknown vlan type\n");
+ return;
+ }
+ vlan_tpid_set(res->port_id, vlan_type, res->tp_id);
+}
+
+cmdline_parse_token_string_t cmd_vlan_tpid_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_tpid_result,
+ vlan, "vlan");
+cmdline_parse_token_string_t cmd_vlan_tpid_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_tpid_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_vlan_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_tpid_result,
+ vlan_type, "inner#outer");
+cmdline_parse_token_string_t cmd_vlan_tpid_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_vlan_tpid_result,
+ what, "tpid");
+cmdline_parse_token_num_t cmd_vlan_tpid_tpid =
+ TOKEN_NUM_INITIALIZER(struct cmd_vlan_tpid_result,
+ tp_id, UINT16);
+cmdline_parse_token_num_t cmd_vlan_tpid_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_vlan_tpid_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_vlan_tpid = {
+ .f = cmd_vlan_tpid_parsed,
+ .data = NULL,
+ .help_str = "set inner|outer tpid tp_id port_id, set the VLAN "
+ "Ether type",
+ .tokens = {
+ (void *)&cmd_vlan_tpid_vlan,
+ (void *)&cmd_vlan_tpid_set,
+ (void *)&cmd_vlan_type,
+ (void *)&cmd_vlan_tpid_what,
+ (void *)&cmd_vlan_tpid_tpid,
+ (void *)&cmd_vlan_tpid_portid,
+ NULL,
+ },
+};
+
+/* *** ADD/REMOVE A VLAN IDENTIFIER TO/FROM A PORT VLAN RX FILTER *** */
+struct cmd_rx_vlan_filter_result {
+ cmdline_fixed_string_t rx_vlan;
+ cmdline_fixed_string_t what;
+ uint16_t vlan_id;
+ uint8_t port_id;
+};
+
+static void
+cmd_rx_vlan_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_rx_vlan_filter_result *res = parsed_result;
+
+ if (!strcmp(res->what, "add"))
+ rx_vft_set(res->port_id, res->vlan_id, 1);
+ else
+ rx_vft_set(res->port_id, res->vlan_id, 0);
+}
+
+cmdline_parse_token_string_t cmd_rx_vlan_filter_rx_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_result,
+ rx_vlan, "rx_vlan");
+cmdline_parse_token_string_t cmd_rx_vlan_filter_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_result,
+ what, "add#rm");
+cmdline_parse_token_num_t cmd_rx_vlan_filter_vlanid =
+ TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result,
+ vlan_id, UINT16);
+cmdline_parse_token_num_t cmd_rx_vlan_filter_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_rx_vlan_filter = {
+ .f = cmd_rx_vlan_filter_parsed,
+ .data = NULL,
+ .help_str = "add/remove a VLAN identifier to/from the set of VLAN "
+ "Identifiers filtered by a port",
+ .tokens = {
+ (void *)&cmd_rx_vlan_filter_rx_vlan,
+ (void *)&cmd_rx_vlan_filter_what,
+ (void *)&cmd_rx_vlan_filter_vlanid,
+ (void *)&cmd_rx_vlan_filter_portid,
+ NULL,
+ },
+};
+
+/* *** ENABLE HARDWARE INSERTION OF VLAN HEADER IN TX PACKETS *** */
+struct cmd_tx_vlan_set_result {
+ cmdline_fixed_string_t tx_vlan;
+ cmdline_fixed_string_t set;
+ uint8_t port_id;
+ uint16_t vlan_id;
+};
+
+static void
+cmd_tx_vlan_set_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tx_vlan_set_result *res = parsed_result;
+
+ tx_vlan_set(res->port_id, res->vlan_id);
+}
+
+cmdline_parse_token_string_t cmd_tx_vlan_set_tx_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_result,
+ tx_vlan, "tx_vlan");
+cmdline_parse_token_string_t cmd_tx_vlan_set_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_result,
+ set, "set");
+cmdline_parse_token_num_t cmd_tx_vlan_set_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_tx_vlan_set_vlanid =
+ TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result,
+ vlan_id, UINT16);
+
+cmdline_parse_inst_t cmd_tx_vlan_set = {
+ .f = cmd_tx_vlan_set_parsed,
+ .data = NULL,
+ .help_str = "enable hardware insertion of a single VLAN header "
+ "with a given TAG Identifier in packets sent on a port",
+ .tokens = {
+ (void *)&cmd_tx_vlan_set_tx_vlan,
+ (void *)&cmd_tx_vlan_set_set,
+ (void *)&cmd_tx_vlan_set_portid,
+ (void *)&cmd_tx_vlan_set_vlanid,
+ NULL,
+ },
+};
+
+/* *** ENABLE HARDWARE INSERTION OF Double VLAN HEADER IN TX PACKETS *** */
+struct cmd_tx_vlan_set_qinq_result {
+ cmdline_fixed_string_t tx_vlan;
+ cmdline_fixed_string_t set;
+ uint8_t port_id;
+ uint16_t vlan_id;
+ uint16_t vlan_id_outer;
+};
+
+static void
+cmd_tx_vlan_set_qinq_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tx_vlan_set_qinq_result *res = parsed_result;
+
+ tx_qinq_set(res->port_id, res->vlan_id, res->vlan_id_outer);
+}
+
+cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_tx_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_qinq_result,
+ tx_vlan, "tx_vlan");
+cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_qinq_result,
+ set, "set");
+cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_vlanid =
+ TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result,
+ vlan_id, UINT16);
+cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_vlanid_outer =
+ TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result,
+ vlan_id_outer, UINT16);
+
+cmdline_parse_inst_t cmd_tx_vlan_set_qinq = {
+ .f = cmd_tx_vlan_set_qinq_parsed,
+ .data = NULL,
+ .help_str = "enable hardware insertion of double VLAN header "
+ "with given TAG Identifiers in packets sent on a port",
+ .tokens = {
+ (void *)&cmd_tx_vlan_set_qinq_tx_vlan,
+ (void *)&cmd_tx_vlan_set_qinq_set,
+ (void *)&cmd_tx_vlan_set_qinq_portid,
+ (void *)&cmd_tx_vlan_set_qinq_vlanid,
+ (void *)&cmd_tx_vlan_set_qinq_vlanid_outer,
+ NULL,
+ },
+};
+
+/* *** ENABLE/DISABLE PORT BASED TX VLAN INSERTION *** */
+struct cmd_tx_vlan_set_pvid_result {
+ cmdline_fixed_string_t tx_vlan;
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t pvid;
+ uint8_t port_id;
+ uint16_t vlan_id;
+ cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_tx_vlan_set_pvid_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tx_vlan_set_pvid_result *res = parsed_result;
+
+ if (strcmp(res->mode, "on") == 0)
+ tx_vlan_pvid_set(res->port_id, res->vlan_id, 1);
+ else
+ tx_vlan_pvid_set(res->port_id, res->vlan_id, 0);
+}
+
+cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_tx_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_pvid_result,
+ tx_vlan, "tx_vlan");
+cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_pvid_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_pvid =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_pvid_result,
+ pvid, "pvid");
+cmdline_parse_token_num_t cmd_tx_vlan_set_pvid_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_pvid_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_tx_vlan_set_pvid_vlan_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_pvid_result,
+ vlan_id, UINT16);
+cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_pvid_result,
+ mode, "on#off");
+
+cmdline_parse_inst_t cmd_tx_vlan_set_pvid = {
+ .f = cmd_tx_vlan_set_pvid_parsed,
+ .data = NULL,
+ .help_str = "tx_vlan set pvid port_id vlan_id (on|off)",
+ .tokens = {
+ (void *)&cmd_tx_vlan_set_pvid_tx_vlan,
+ (void *)&cmd_tx_vlan_set_pvid_set,
+ (void *)&cmd_tx_vlan_set_pvid_pvid,
+ (void *)&cmd_tx_vlan_set_pvid_port_id,
+ (void *)&cmd_tx_vlan_set_pvid_vlan_id,
+ (void *)&cmd_tx_vlan_set_pvid_mode,
+ NULL,
+ },
+};
+
+/* *** DISABLE HARDWARE INSERTION OF VLAN HEADER IN TX PACKETS *** */
+struct cmd_tx_vlan_reset_result {
+ cmdline_fixed_string_t tx_vlan;
+ cmdline_fixed_string_t reset;
+ uint8_t port_id;
+};
+
+static void
+cmd_tx_vlan_reset_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tx_vlan_reset_result *res = parsed_result;
+
+ tx_vlan_reset(res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_tx_vlan_reset_tx_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_reset_result,
+ tx_vlan, "tx_vlan");
+cmdline_parse_token_string_t cmd_tx_vlan_reset_reset =
+ TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_reset_result,
+ reset, "reset");
+cmdline_parse_token_num_t cmd_tx_vlan_reset_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_reset_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_tx_vlan_reset = {
+ .f = cmd_tx_vlan_reset_parsed,
+ .data = NULL,
+ .help_str = "disable hardware insertion of a VLAN header in packets "
+ "sent on a port",
+ .tokens = {
+ (void *)&cmd_tx_vlan_reset_tx_vlan,
+ (void *)&cmd_tx_vlan_reset_reset,
+ (void *)&cmd_tx_vlan_reset_portid,
+ NULL,
+ },
+};
+
+
+/* *** ENABLE HARDWARE INSERTION OF CHECKSUM IN TX PACKETS *** */
+struct cmd_csum_result {
+ cmdline_fixed_string_t csum;
+ cmdline_fixed_string_t mode;
+ cmdline_fixed_string_t proto;
+ cmdline_fixed_string_t hwsw;
+ uint8_t port_id;
+};
+
+static void
+csum_show(int port_id)
+{
+ struct rte_eth_dev_info dev_info;
+ uint16_t ol_flags;
+
+ ol_flags = ports[port_id].tx_ol_flags;
+ printf("Parse tunnel is %s\n",
+ (ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) ? "on" : "off");
+ printf("IP checksum offload is %s\n",
+ (ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) ? "hw" : "sw");
+ printf("UDP checksum offload is %s\n",
+ (ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
+ printf("TCP checksum offload is %s\n",
+ (ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
+ printf("SCTP checksum offload is %s\n",
+ (ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
+ printf("Outer-Ip checksum offload is %s\n",
+ (ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ? "hw" : "sw");
+
+ /* display warnings if configuration is not supported by the NIC */
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if ((ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) &&
+ (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+ printf("Warning: hardware IP checksum enabled but not "
+ "supported by port %d\n", port_id);
+ }
+ if ((ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) &&
+ (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
+ printf("Warning: hardware UDP checksum enabled but not "
+ "supported by port %d\n", port_id);
+ }
+ if ((ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) &&
+ (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
+ printf("Warning: hardware TCP checksum enabled but not "
+ "supported by port %d\n", port_id);
+ }
+ if ((ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
+ (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
+ printf("Warning: hardware SCTP checksum enabled but not "
+ "supported by port %d\n", port_id);
+ }
+ if ((ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) &&
+ (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
+ printf("Warning: hardware outer IP checksum enabled but not "
+ "supported by port %d\n", port_id);
+ }
+}
+
+static void
+cmd_csum_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_csum_result *res = parsed_result;
+ int hw = 0;
+ uint16_t mask = 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN)) {
+ printf("invalid port %d\n", res->port_id);
+ return;
+ }
+
+ if (!strcmp(res->mode, "set")) {
+
+ if (!strcmp(res->hwsw, "hw"))
+ hw = 1;
+
+ if (!strcmp(res->proto, "ip")) {
+ mask = TESTPMD_TX_OFFLOAD_IP_CKSUM;
+ } else if (!strcmp(res->proto, "udp")) {
+ mask = TESTPMD_TX_OFFLOAD_UDP_CKSUM;
+ } else if (!strcmp(res->proto, "tcp")) {
+ mask = TESTPMD_TX_OFFLOAD_TCP_CKSUM;
+ } else if (!strcmp(res->proto, "sctp")) {
+ mask = TESTPMD_TX_OFFLOAD_SCTP_CKSUM;
+ } else if (!strcmp(res->proto, "outer-ip")) {
+ mask = TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM;
+ }
+
+ if (hw)
+ ports[res->port_id].tx_ol_flags |= mask;
+ else
+ ports[res->port_id].tx_ol_flags &= (~mask);
+ }
+ csum_show(res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_csum_csum =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_result,
+ csum, "csum");
+cmdline_parse_token_string_t cmd_csum_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_result,
+ mode, "set");
+cmdline_parse_token_string_t cmd_csum_proto =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_result,
+ proto, "ip#tcp#udp#sctp#outer-ip");
+cmdline_parse_token_string_t cmd_csum_hwsw =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_result,
+ hwsw, "hw#sw");
+cmdline_parse_token_num_t cmd_csum_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_csum_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_csum_set = {
+ .f = cmd_csum_parsed,
+ .data = NULL,
+ .help_str = "enable/disable hardware calculation of L3/L4 checksum when "
+ "using csum forward engine: csum set ip|tcp|udp|sctp|outer-ip hw|sw <port>",
+ .tokens = {
+ (void *)&cmd_csum_csum,
+ (void *)&cmd_csum_mode,
+ (void *)&cmd_csum_proto,
+ (void *)&cmd_csum_hwsw,
+ (void *)&cmd_csum_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_token_string_t cmd_csum_mode_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_result,
+ mode, "show");
+
+cmdline_parse_inst_t cmd_csum_show = {
+ .f = cmd_csum_parsed,
+ .data = NULL,
+ .help_str = "show checksum offload configuration: csum show <port>",
+ .tokens = {
+ (void *)&cmd_csum_csum,
+ (void *)&cmd_csum_mode_show,
+ (void *)&cmd_csum_portid,
+ NULL,
+ },
+};
+
+/* Enable/disable tunnel parsing */
+struct cmd_csum_tunnel_result {
+ cmdline_fixed_string_t csum;
+ cmdline_fixed_string_t parse;
+ cmdline_fixed_string_t onoff;
+ uint8_t port_id;
+};
+
+static void
+cmd_csum_tunnel_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_csum_tunnel_result *res = parsed_result;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ if (!strcmp(res->onoff, "on"))
+ ports[res->port_id].tx_ol_flags |=
+ TESTPMD_TX_OFFLOAD_PARSE_TUNNEL;
+ else
+ ports[res->port_id].tx_ol_flags &=
+ (~TESTPMD_TX_OFFLOAD_PARSE_TUNNEL);
+
+ csum_show(res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_csum_tunnel_csum =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result,
+ csum, "csum");
+cmdline_parse_token_string_t cmd_csum_tunnel_parse =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result,
+ parse, "parse_tunnel");
+cmdline_parse_token_string_t cmd_csum_tunnel_onoff =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result,
+ onoff, "on#off");
+cmdline_parse_token_num_t cmd_csum_tunnel_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_csum_tunnel_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_csum_tunnel = {
+ .f = cmd_csum_tunnel_parsed,
+ .data = NULL,
+ .help_str = "enable/disable parsing of tunnels for csum engine: "
+ "csum parse_tunnel on|off <tx-port>",
+ .tokens = {
+ (void *)&cmd_csum_tunnel_csum,
+ (void *)&cmd_csum_tunnel_parse,
+ (void *)&cmd_csum_tunnel_onoff,
+ (void *)&cmd_csum_tunnel_portid,
+ NULL,
+ },
+};
+
+/* *** ENABLE HARDWARE SEGMENTATION IN TX PACKETS *** */
+struct cmd_tso_set_result {
+ cmdline_fixed_string_t tso;
+ cmdline_fixed_string_t mode;
+ uint16_t tso_segsz;
+ uint8_t port_id;
+};
+
+static void
+cmd_tso_set_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tso_set_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ if (!strcmp(res->mode, "set"))
+ ports[res->port_id].tso_segsz = res->tso_segsz;
+
+ if (ports[res->port_id].tso_segsz == 0)
+ printf("TSO is disabled\n");
+ else
+ printf("TSO segment size is %d\n",
+ ports[res->port_id].tso_segsz);
+
+ /* display warnings if configuration is not supported by the NIC */
+ rte_eth_dev_info_get(res->port_id, &dev_info);
+ if ((ports[res->port_id].tso_segsz != 0) &&
+ (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
+ printf("Warning: TSO enabled but not "
+ "supported by port %d\n", res->port_id);
+ }
+}
+
+cmdline_parse_token_string_t cmd_tso_set_tso =
+ TOKEN_STRING_INITIALIZER(struct cmd_tso_set_result,
+ tso, "tso");
+cmdline_parse_token_string_t cmd_tso_set_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_tso_set_result,
+ mode, "set");
+cmdline_parse_token_num_t cmd_tso_set_tso_segsz =
+ TOKEN_NUM_INITIALIZER(struct cmd_tso_set_result,
+ tso_segsz, UINT16);
+cmdline_parse_token_num_t cmd_tso_set_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_tso_set_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_tso_set = {
+ .f = cmd_tso_set_parsed,
+ .data = NULL,
+ .help_str = "Set TSO segment size for csum engine (0 to disable): "
+ "tso set <tso_segsz> <port>",
+ .tokens = {
+ (void *)&cmd_tso_set_tso,
+ (void *)&cmd_tso_set_mode,
+ (void *)&cmd_tso_set_tso_segsz,
+ (void *)&cmd_tso_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_token_string_t cmd_tso_show_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_tso_set_result,
+ mode, "show");
+
+
+cmdline_parse_inst_t cmd_tso_show = {
+ .f = cmd_tso_set_parsed,
+ .data = NULL,
+ .help_str = "Show TSO segment size for csum engine: "
+ "tso show <port>",
+ .tokens = {
+ (void *)&cmd_tso_set_tso,
+ (void *)&cmd_tso_show_mode,
+ (void *)&cmd_tso_set_portid,
+ NULL,
+ },
+};
+
+/* *** ENABLE/DISABLE FLUSH ON RX STREAMS *** */
+struct cmd_set_flush_rx {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t flush_rx;
+ cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_flush_rx_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_flush_rx *res = parsed_result;
+ no_flush_rx = (uint8_t)((strcmp(res->mode, "on") == 0) ? 0 : 1);
+}
+
+cmdline_parse_token_string_t cmd_setflushrx_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_flush_rx,
+ set, "set");
+cmdline_parse_token_string_t cmd_setflushrx_flush_rx =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_flush_rx,
+ flush_rx, "flush_rx");
+cmdline_parse_token_string_t cmd_setflushrx_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_flush_rx,
+ mode, "on#off");
+
+
+cmdline_parse_inst_t cmd_set_flush_rx = {
+ .f = cmd_set_flush_rx_parsed,
+ .help_str = "set flush_rx on|off: enable/disable flush on rx streams",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setflushrx_set,
+ (void *)&cmd_setflushrx_flush_rx,
+ (void *)&cmd_setflushrx_mode,
+ NULL,
+ },
+};
+
+/* *** ENABLE/DISABLE LINK STATUS CHECK *** */
+struct cmd_set_link_check {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t link_check;
+ cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_link_check_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_link_check *res = parsed_result;
+ no_link_check = (uint8_t)((strcmp(res->mode, "on") == 0) ? 0 : 1);
+}
+
+cmdline_parse_token_string_t cmd_setlinkcheck_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_check,
+ set, "set");
+cmdline_parse_token_string_t cmd_setlinkcheck_link_check =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_check,
+ link_check, "link_check");
+cmdline_parse_token_string_t cmd_setlinkcheck_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_check,
+ mode, "on#off");
+
+
+cmdline_parse_inst_t cmd_set_link_check = {
+ .f = cmd_set_link_check_parsed,
+ .help_str = "set link_check on|off: enable/disable link status check "
+ "when starting/stopping a port",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setlinkcheck_set,
+ (void *)&cmd_setlinkcheck_link_check,
+ (void *)&cmd_setlinkcheck_mode,
+ NULL,
+ },
+};
+
+#ifdef RTE_NIC_BYPASS
+/* *** SET NIC BYPASS MODE *** */
+struct cmd_set_bypass_mode_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bypass;
+ cmdline_fixed_string_t mode;
+ cmdline_fixed_string_t value;
+ uint8_t port_id;
+};
+
+static void
+cmd_set_bypass_mode_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bypass_mode_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ uint32_t bypass_mode = RTE_BYPASS_MODE_NORMAL;
+
+ if (!bypass_is_supported(port_id))
+ return;
+
+ if (!strcmp(res->value, "bypass"))
+ bypass_mode = RTE_BYPASS_MODE_BYPASS;
+ else if (!strcmp(res->value, "isolate"))
+ bypass_mode = RTE_BYPASS_MODE_ISOLATE;
+ else
+ bypass_mode = RTE_BYPASS_MODE_NORMAL;
+
+ /* Set the bypass mode for the relevant port. */
+ if (0 != rte_eth_dev_bypass_state_set(port_id, &bypass_mode)) {
+ printf("\t Failed to set bypass mode for port = %d.\n", port_id);
+ }
+}
+
+cmdline_parse_token_string_t cmd_setbypass_mode_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_mode_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setbypass_mode_bypass =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_mode_result,
+ bypass, "bypass");
+cmdline_parse_token_string_t cmd_setbypass_mode_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_mode_result,
+ mode, "mode");
+cmdline_parse_token_string_t cmd_setbypass_mode_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_mode_result,
+ value, "normal#bypass#isolate");
+cmdline_parse_token_num_t cmd_setbypass_mode_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_bypass_mode_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_set_bypass_mode = {
+ .f = cmd_set_bypass_mode_parsed,
+ .help_str = "set bypass mode (normal|bypass|isolate) (port_id): "
+ "Set the NIC bypass mode for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setbypass_mode_set,
+ (void *)&cmd_setbypass_mode_bypass,
+ (void *)&cmd_setbypass_mode_mode,
+ (void *)&cmd_setbypass_mode_value,
+ (void *)&cmd_setbypass_mode_port,
+ NULL,
+ },
+};
+
+/* *** SET NIC BYPASS EVENT *** */
+struct cmd_set_bypass_event_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bypass;
+ cmdline_fixed_string_t event;
+ cmdline_fixed_string_t event_value;
+ cmdline_fixed_string_t mode;
+ cmdline_fixed_string_t mode_value;
+ uint8_t port_id;
+};
+
+static void
+cmd_set_bypass_event_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int32_t rc;
+ struct cmd_set_bypass_event_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ uint32_t bypass_event = RTE_BYPASS_EVENT_NONE;
+ uint32_t bypass_mode = RTE_BYPASS_MODE_NORMAL;
+
+ if (!bypass_is_supported(port_id))
+ return;
+
+ if (!strcmp(res->event_value, "timeout"))
+ bypass_event = RTE_BYPASS_EVENT_TIMEOUT;
+ else if (!strcmp(res->event_value, "os_on"))
+ bypass_event = RTE_BYPASS_EVENT_OS_ON;
+ else if (!strcmp(res->event_value, "os_off"))
+ bypass_event = RTE_BYPASS_EVENT_OS_OFF;
+ else if (!strcmp(res->event_value, "power_on"))
+ bypass_event = RTE_BYPASS_EVENT_POWER_ON;
+ else if (!strcmp(res->event_value, "power_off"))
+ bypass_event = RTE_BYPASS_EVENT_POWER_OFF;
+ else
+ bypass_event = RTE_BYPASS_EVENT_NONE;
+
+ if (!strcmp(res->mode_value, "bypass"))
+ bypass_mode = RTE_BYPASS_MODE_BYPASS;
+ else if (!strcmp(res->mode_value, "isolate"))
+ bypass_mode = RTE_BYPASS_MODE_ISOLATE;
+ else
+ bypass_mode = RTE_BYPASS_MODE_NORMAL;
+
+ /* Set the watchdog timeout. */
+ if (bypass_event == RTE_BYPASS_EVENT_TIMEOUT) {
+
+ rc = -EINVAL;
+ if (!RTE_BYPASS_TMT_VALID(bypass_timeout) ||
+ (rc = rte_eth_dev_wd_timeout_store(port_id,
+ bypass_timeout)) != 0) {
+ printf("Failed to set timeout value %u "
+ "for port %d, errto code: %d.\n",
+ bypass_timeout, port_id, rc);
+ }
+ }
+
+ /* Set the bypass event to transition to bypass mode. */
+ if (0 != rte_eth_dev_bypass_event_store(port_id,
+ bypass_event, bypass_mode)) {
+ printf("\t Failed to set bypass event for port = %d.\n", port_id);
+ }
+
+}
+
+cmdline_parse_token_string_t cmd_setbypass_event_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setbypass_event_bypass =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result,
+ bypass, "bypass");
+cmdline_parse_token_string_t cmd_setbypass_event_event =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result,
+ event, "event");
+cmdline_parse_token_string_t cmd_setbypass_event_event_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result,
+ event_value, "none#timeout#os_off#os_on#power_on#power_off");
+cmdline_parse_token_string_t cmd_setbypass_event_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result,
+ mode, "mode");
+cmdline_parse_token_string_t cmd_setbypass_event_mode_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result,
+ mode_value, "normal#bypass#isolate");
+cmdline_parse_token_num_t cmd_setbypass_event_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_bypass_event_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_set_bypass_event = {
+ .f = cmd_set_bypass_event_parsed,
+ .help_str = "set bypass event (timeout|os_on|os_off|power_on|power_off) "
+ "mode (normal|bypass|isolate) (port_id): "
+ "Set the NIC bypass event mode for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setbypass_event_set,
+ (void *)&cmd_setbypass_event_bypass,
+ (void *)&cmd_setbypass_event_event,
+ (void *)&cmd_setbypass_event_event_value,
+ (void *)&cmd_setbypass_event_mode,
+ (void *)&cmd_setbypass_event_mode_value,
+ (void *)&cmd_setbypass_event_port,
+ NULL,
+ },
+};
+
+
+/* *** SET NIC BYPASS TIMEOUT *** */
+struct cmd_set_bypass_timeout_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bypass;
+ cmdline_fixed_string_t timeout;
+ cmdline_fixed_string_t value;
+};
+
+static void
+cmd_set_bypass_timeout_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bypass_timeout_result *res = parsed_result;
+
+ if (!strcmp(res->value, "1.5"))
+ bypass_timeout = RTE_BYPASS_TMT_1_5_SEC;
+ else if (!strcmp(res->value, "2"))
+ bypass_timeout = RTE_BYPASS_TMT_2_SEC;
+ else if (!strcmp(res->value, "3"))
+ bypass_timeout = RTE_BYPASS_TMT_3_SEC;
+ else if (!strcmp(res->value, "4"))
+ bypass_timeout = RTE_BYPASS_TMT_4_SEC;
+ else if (!strcmp(res->value, "8"))
+ bypass_timeout = RTE_BYPASS_TMT_8_SEC;
+ else if (!strcmp(res->value, "16"))
+ bypass_timeout = RTE_BYPASS_TMT_16_SEC;
+ else if (!strcmp(res->value, "32"))
+ bypass_timeout = RTE_BYPASS_TMT_32_SEC;
+ else
+ bypass_timeout = RTE_BYPASS_TMT_OFF;
+}
+
+cmdline_parse_token_string_t cmd_setbypass_timeout_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_timeout_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setbypass_timeout_bypass =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_timeout_result,
+ bypass, "bypass");
+cmdline_parse_token_string_t cmd_setbypass_timeout_timeout =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_timeout_result,
+ timeout, "timeout");
+cmdline_parse_token_string_t cmd_setbypass_timeout_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_timeout_result,
+ value, "0#1.5#2#3#4#8#16#32");
+
+cmdline_parse_inst_t cmd_set_bypass_timeout = {
+ .f = cmd_set_bypass_timeout_parsed,
+ .help_str = "set bypass timeout (0|1.5|2|3|4|8|16|32) seconds: "
+ "Set the NIC bypass watchdog timeout",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setbypass_timeout_set,
+ (void *)&cmd_setbypass_timeout_bypass,
+ (void *)&cmd_setbypass_timeout_timeout,
+ (void *)&cmd_setbypass_timeout_value,
+ NULL,
+ },
+};
+
+/* *** SHOW NIC BYPASS MODE *** */
+struct cmd_show_bypass_config_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t bypass;
+ cmdline_fixed_string_t config;
+ uint8_t port_id;
+};
+
+static void
+cmd_show_bypass_config_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_bypass_config_result *res = parsed_result;
+ uint32_t event_mode;
+ uint32_t bypass_mode;
+ portid_t port_id = res->port_id;
+ uint32_t timeout = bypass_timeout;
+ int i;
+
+ static const char * const timeouts[RTE_BYPASS_TMT_NUM] =
+ {"off", "1.5", "2", "3", "4", "8", "16", "32"};
+ static const char * const modes[RTE_BYPASS_MODE_NUM] =
+ {"UNKNOWN", "normal", "bypass", "isolate"};
+ static const char * const events[RTE_BYPASS_EVENT_NUM] = {
+ "NONE",
+ "OS/board on",
+ "power supply on",
+ "OS/board off",
+ "power supply off",
+ "timeout"};
+ int num_events = (sizeof events) / (sizeof events[0]);
+
+ if (!bypass_is_supported(port_id))
+ return;
+
+ /* Display the bypass mode.*/
+ if (0 != rte_eth_dev_bypass_state_show(port_id, &bypass_mode)) {
+ printf("\tFailed to get bypass mode for port = %d\n", port_id);
+ return;
+ }
+ else {
+ if (!RTE_BYPASS_MODE_VALID(bypass_mode))
+ bypass_mode = RTE_BYPASS_MODE_NONE;
+
+ printf("\tbypass mode = %s\n", modes[bypass_mode]);
+ }
+
+ /* Display the bypass timeout.*/
+ if (!RTE_BYPASS_TMT_VALID(timeout))
+ timeout = RTE_BYPASS_TMT_OFF;
+
+ printf("\tbypass timeout = %s\n", timeouts[timeout]);
+
+ /* Display the bypass events and associated modes. */
+ for (i = RTE_BYPASS_EVENT_START; i < num_events; i++) {
+
+ if (0 != rte_eth_dev_bypass_event_show(port_id, i, &event_mode)) {
+ printf("\tFailed to get bypass mode for event = %s\n",
+ events[i]);
+ } else {
+ if (!RTE_BYPASS_MODE_VALID(event_mode))
+ event_mode = RTE_BYPASS_MODE_NONE;
+
+ printf("\tbypass event: %-16s = %s\n", events[i],
+ modes[event_mode]);
+ }
+ }
+}
+
+cmdline_parse_token_string_t cmd_showbypass_config_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_bypass_config_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_showbypass_config_bypass =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_bypass_config_result,
+ bypass, "bypass");
+cmdline_parse_token_string_t cmd_showbypass_config_config =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_bypass_config_result,
+ config, "config");
+cmdline_parse_token_num_t cmd_showbypass_config_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_bypass_config_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_show_bypass_config = {
+ .f = cmd_show_bypass_config_parsed,
+ .help_str = "show bypass config (port_id): "
+ "Show the NIC bypass config for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_showbypass_config_show,
+ (void *)&cmd_showbypass_config_bypass,
+ (void *)&cmd_showbypass_config_config,
+ (void *)&cmd_showbypass_config_port,
+ NULL,
+ },
+};
+#endif
+
+#ifdef RTE_LIBRTE_PMD_BOND
+/* *** SET BONDING MODE *** */
+struct cmd_set_bonding_mode_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t mode;
+ uint8_t value;
+ uint8_t port_id;
+};
+
+static void cmd_set_bonding_mode_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bonding_mode_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+
+ /* Set the bonding mode for the relevant port. */
+ if (0 != rte_eth_bond_mode_set(port_id, res->value))
+ printf("\t Failed to set bonding mode for port = %d.\n", port_id);
+}
+
+cmdline_parse_token_string_t cmd_setbonding_mode_set =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_mode_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setbonding_mode_bonding =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_mode_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_setbonding_mode_mode =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_mode_result,
+ mode, "mode");
+cmdline_parse_token_num_t cmd_setbonding_mode_value =
+TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_mode_result,
+ value, UINT8);
+cmdline_parse_token_num_t cmd_setbonding_mode_port =
+TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_mode_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_set_bonding_mode = {
+ .f = cmd_set_bonding_mode_parsed,
+ .help_str = "set bonding mode (mode_value) (port_id): Set the bonding mode for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *) &cmd_setbonding_mode_set,
+ (void *) &cmd_setbonding_mode_bonding,
+ (void *) &cmd_setbonding_mode_mode,
+ (void *) &cmd_setbonding_mode_value,
+ (void *) &cmd_setbonding_mode_port,
+ NULL
+ }
+};
+
+/* *** SET BALANCE XMIT POLICY *** */
+struct cmd_set_bonding_balance_xmit_policy_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t balance_xmit_policy;
+ uint8_t port_id;
+ cmdline_fixed_string_t policy;
+};
+
+static void cmd_set_bonding_balance_xmit_policy_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bonding_balance_xmit_policy_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ uint8_t policy;
+
+ if (!strcmp(res->policy, "l2")) {
+ policy = BALANCE_XMIT_POLICY_LAYER2;
+ } else if (!strcmp(res->policy, "l23")) {
+ policy = BALANCE_XMIT_POLICY_LAYER23;
+ } else if (!strcmp(res->policy, "l34")) {
+ policy = BALANCE_XMIT_POLICY_LAYER34;
+ } else {
+ printf("\t Invalid xmit policy selection");
+ return;
+ }
+
+ /* Set the bonding mode for the relevant port. */
+ if (0 != rte_eth_bond_xmit_policy_set(port_id, policy)) {
+ printf("\t Failed to set bonding balance xmit policy for port = %d.\n",
+ port_id);
+ }
+}
+
+cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_set =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_bonding =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_balance_xmit_policy =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
+ balance_xmit_policy, "balance_xmit_policy");
+cmdline_parse_token_num_t cmd_setbonding_balance_xmit_policy_port =
+TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_policy =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
+ policy, "l2#l23#l34");
+
+cmdline_parse_inst_t cmd_set_balance_xmit_policy = {
+ .f = cmd_set_bonding_balance_xmit_policy_parsed,
+ .help_str = "set bonding balance_xmit_policy (port_id) (policy_value): Set the bonding balance_xmit_policy for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setbonding_balance_xmit_policy_set,
+ (void *)&cmd_setbonding_balance_xmit_policy_bonding,
+ (void *)&cmd_setbonding_balance_xmit_policy_balance_xmit_policy,
+ (void *)&cmd_setbonding_balance_xmit_policy_port,
+ (void *)&cmd_setbonding_balance_xmit_policy_policy,
+ NULL
+ }
+};
+
+/* *** SHOW NIC BONDING CONFIGURATION *** */
+struct cmd_show_bonding_config_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t config;
+ uint8_t port_id;
+};
+
+static void cmd_show_bonding_config_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_bonding_config_result *res = parsed_result;
+ int bonding_mode;
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ int num_slaves, num_active_slaves;
+ int primary_id;
+ int i;
+ portid_t port_id = res->port_id;
+
+ /* Display the bonding mode.*/
+ bonding_mode = rte_eth_bond_mode_get(port_id);
+ if (bonding_mode < 0) {
+ printf("\tFailed to get bonding mode for port = %d\n", port_id);
+ return;
+ } else
+ printf("\tBonding mode: %d\n", bonding_mode);
+
+ if (bonding_mode == BONDING_MODE_BALANCE) {
+ int balance_xmit_policy;
+
+ balance_xmit_policy = rte_eth_bond_xmit_policy_get(port_id);
+ if (balance_xmit_policy < 0) {
+ printf("\tFailed to get balance xmit policy for port = %d\n",
+ port_id);
+ return;
+ } else {
+ printf("\tBalance Xmit Policy: ");
+
+ switch (balance_xmit_policy) {
+ case BALANCE_XMIT_POLICY_LAYER2:
+ printf("BALANCE_XMIT_POLICY_LAYER2");
+ break;
+ case BALANCE_XMIT_POLICY_LAYER23:
+ printf("BALANCE_XMIT_POLICY_LAYER23");
+ break;
+ case BALANCE_XMIT_POLICY_LAYER34:
+ printf("BALANCE_XMIT_POLICY_LAYER34");
+ break;
+ }
+ printf("\n");
+ }
+ }
+
+ num_slaves = rte_eth_bond_slaves_get(port_id, slaves, RTE_MAX_ETHPORTS);
+
+ if (num_slaves < 0) {
+ printf("\tFailed to get slave list for port = %d\n", port_id);
+ return;
+ }
+ if (num_slaves > 0) {
+ printf("\tSlaves (%d): [", num_slaves);
+ for (i = 0; i < num_slaves - 1; i++)
+ printf("%d ", slaves[i]);
+
+ printf("%d]\n", slaves[num_slaves - 1]);
+ } else {
+ printf("\tSlaves: []\n");
+
+ }
+
+ num_active_slaves = rte_eth_bond_active_slaves_get(port_id, slaves,
+ RTE_MAX_ETHPORTS);
+
+ if (num_active_slaves < 0) {
+ printf("\tFailed to get active slave list for port = %d\n", port_id);
+ return;
+ }
+ if (num_active_slaves > 0) {
+ printf("\tActive Slaves (%d): [", num_active_slaves);
+ for (i = 0; i < num_active_slaves - 1; i++)
+ printf("%d ", slaves[i]);
+
+ printf("%d]\n", slaves[num_active_slaves - 1]);
+
+ } else {
+ printf("\tActive Slaves: []\n");
+
+ }
+
+ primary_id = rte_eth_bond_primary_get(port_id);
+ if (primary_id < 0) {
+ printf("\tFailed to get primary slave for port = %d\n", port_id);
+ return;
+ } else
+ printf("\tPrimary: [%d]\n", primary_id);
+
+}
+
+cmdline_parse_token_string_t cmd_showbonding_config_show =
+TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_config_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_showbonding_config_bonding =
+TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_config_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_showbonding_config_config =
+TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_config_result,
+ config, "config");
+cmdline_parse_token_num_t cmd_showbonding_config_port =
+TOKEN_NUM_INITIALIZER(struct cmd_show_bonding_config_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_show_bonding_config = {
+ .f = cmd_show_bonding_config_parsed,
+ .help_str = "show bonding config (port_id): Show the bonding config for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_showbonding_config_show,
+ (void *)&cmd_showbonding_config_bonding,
+ (void *)&cmd_showbonding_config_config,
+ (void *)&cmd_showbonding_config_port,
+ NULL
+ }
+};
+
+/* *** SET BONDING PRIMARY *** */
+struct cmd_set_bonding_primary_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t primary;
+ uint8_t slave_id;
+ uint8_t port_id;
+};
+
+static void cmd_set_bonding_primary_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bonding_primary_result *res = parsed_result;
+ portid_t master_port_id = res->port_id;
+ portid_t slave_port_id = res->slave_id;
+
+ /* Set the primary slave for a bonded device. */
+ if (0 != rte_eth_bond_primary_set(master_port_id, slave_port_id)) {
+ printf("\t Failed to set primary slave for port = %d.\n",
+ master_port_id);
+ return;
+ }
+ init_port_config();
+}
+
+cmdline_parse_token_string_t cmd_setbonding_primary_set =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_primary_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setbonding_primary_bonding =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_primary_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_setbonding_primary_primary =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_primary_result,
+ primary, "primary");
+cmdline_parse_token_num_t cmd_setbonding_primary_slave =
+TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result,
+ slave_id, UINT8);
+cmdline_parse_token_num_t cmd_setbonding_primary_port =
+TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_set_bonding_primary = {
+ .f = cmd_set_bonding_primary_parsed,
+ .help_str = "set bonding primary (slave_id) (port_id): Set the primary slave for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setbonding_primary_set,
+ (void *)&cmd_setbonding_primary_bonding,
+ (void *)&cmd_setbonding_primary_primary,
+ (void *)&cmd_setbonding_primary_slave,
+ (void *)&cmd_setbonding_primary_port,
+ NULL
+ }
+};
+
+/* *** ADD SLAVE *** */
+struct cmd_add_bonding_slave_result {
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t slave;
+ uint8_t slave_id;
+ uint8_t port_id;
+};
+
+static void cmd_add_bonding_slave_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_bonding_slave_result *res = parsed_result;
+ portid_t master_port_id = res->port_id;
+ portid_t slave_port_id = res->slave_id;
+
+ /* Set the primary slave for a bonded device. */
+ if (0 != rte_eth_bond_slave_add(master_port_id, slave_port_id)) {
+ printf("\t Failed to add slave %d to master port = %d.\n",
+ slave_port_id, master_port_id);
+ return;
+ }
+ init_port_config();
+ set_port_slave_flag(slave_port_id);
+}
+
+cmdline_parse_token_string_t cmd_addbonding_slave_add =
+TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result,
+ add, "add");
+cmdline_parse_token_string_t cmd_addbonding_slave_bonding =
+TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_addbonding_slave_slave =
+TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result,
+ slave, "slave");
+cmdline_parse_token_num_t cmd_addbonding_slave_slaveid =
+TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result,
+ slave_id, UINT8);
+cmdline_parse_token_num_t cmd_addbonding_slave_port =
+TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_add_bonding_slave = {
+ .f = cmd_add_bonding_slave_parsed,
+ .help_str = "add bonding slave (slave_id) (port_id): Add a slave device to a bonded device",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_addbonding_slave_add,
+ (void *)&cmd_addbonding_slave_bonding,
+ (void *)&cmd_addbonding_slave_slave,
+ (void *)&cmd_addbonding_slave_slaveid,
+ (void *)&cmd_addbonding_slave_port,
+ NULL
+ }
+};
+
+/* *** REMOVE SLAVE *** */
+struct cmd_remove_bonding_slave_result {
+ cmdline_fixed_string_t remove;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t slave;
+ uint8_t slave_id;
+ uint8_t port_id;
+};
+
+static void cmd_remove_bonding_slave_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_remove_bonding_slave_result *res = parsed_result;
+ portid_t master_port_id = res->port_id;
+ portid_t slave_port_id = res->slave_id;
+
+ /* Set the primary slave for a bonded device. */
+ if (0 != rte_eth_bond_slave_remove(master_port_id, slave_port_id)) {
+ printf("\t Failed to remove slave %d from master port = %d.\n",
+ slave_port_id, master_port_id);
+ return;
+ }
+ init_port_config();
+ clear_port_slave_flag(slave_port_id);
+}
+
+cmdline_parse_token_string_t cmd_removebonding_slave_remove =
+ TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result,
+ remove, "remove");
+cmdline_parse_token_string_t cmd_removebonding_slave_bonding =
+ TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_removebonding_slave_slave =
+ TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result,
+ slave, "slave");
+cmdline_parse_token_num_t cmd_removebonding_slave_slaveid =
+ TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_slave_result,
+ slave_id, UINT8);
+cmdline_parse_token_num_t cmd_removebonding_slave_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_slave_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_remove_bonding_slave = {
+ .f = cmd_remove_bonding_slave_parsed,
+ .help_str = "remove bonding slave (slave_id) (port_id): Remove a slave device from a bonded device",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_removebonding_slave_remove,
+ (void *)&cmd_removebonding_slave_bonding,
+ (void *)&cmd_removebonding_slave_slave,
+ (void *)&cmd_removebonding_slave_slaveid,
+ (void *)&cmd_removebonding_slave_port,
+ NULL
+ }
+};
+
+/* *** CREATE BONDED DEVICE *** */
+struct cmd_create_bonded_device_result {
+ cmdline_fixed_string_t create;
+ cmdline_fixed_string_t bonded;
+ cmdline_fixed_string_t device;
+ uint8_t mode;
+ uint8_t socket;
+};
+
+static int bond_dev_num = 0;
+
+static void cmd_create_bonded_device_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_create_bonded_device_result *res = parsed_result;
+ char ethdev_name[RTE_ETH_NAME_MAX_LEN];
+ int port_id;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+
+ snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "eth_bond_testpmd_%d",
+ bond_dev_num++);
+
+ /* Create a new bonded device. */
+ port_id = rte_eth_bond_create(ethdev_name, res->mode, res->socket);
+ if (port_id < 0) {
+ printf("\t Failed to create bonded device.\n");
+ return;
+ } else {
+ printf("Created new bonded device %s on (port %d).\n", ethdev_name,
+ port_id);
+
+ /* Update number of ports */
+ nb_ports = rte_eth_dev_count();
+ reconfig(port_id, res->socket);
+ rte_eth_promiscuous_enable(port_id);
+ ports[port_id].enabled = 1;
+ }
+
+}
+
+cmdline_parse_token_string_t cmd_createbonded_device_create =
+ TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result,
+ create, "create");
+cmdline_parse_token_string_t cmd_createbonded_device_bonded =
+ TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result,
+ bonded, "bonded");
+cmdline_parse_token_string_t cmd_createbonded_device_device =
+ TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result,
+ device, "device");
+cmdline_parse_token_num_t cmd_createbonded_device_mode =
+ TOKEN_NUM_INITIALIZER(struct cmd_create_bonded_device_result,
+ mode, UINT8);
+cmdline_parse_token_num_t cmd_createbonded_device_socket =
+ TOKEN_NUM_INITIALIZER(struct cmd_create_bonded_device_result,
+ socket, UINT8);
+
+cmdline_parse_inst_t cmd_create_bonded_device = {
+ .f = cmd_create_bonded_device_parsed,
+ .help_str = "create bonded device (mode) (socket): Create a new bonded device with specific bonding mode and socket",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_createbonded_device_create,
+ (void *)&cmd_createbonded_device_bonded,
+ (void *)&cmd_createbonded_device_device,
+ (void *)&cmd_createbonded_device_mode,
+ (void *)&cmd_createbonded_device_socket,
+ NULL
+ }
+};
+
+/* *** SET MAC ADDRESS IN BONDED DEVICE *** */
+struct cmd_set_bond_mac_addr_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t mac_addr;
+ uint8_t port_num;
+ struct ether_addr address;
+};
+
+static void cmd_set_bond_mac_addr_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bond_mac_addr_result *res = parsed_result;
+ int ret;
+
+ if (port_id_is_invalid(res->port_num, ENABLED_WARN))
+ return;
+
+ ret = rte_eth_bond_mac_address_set(res->port_num, &res->address);
+
+ /* check the return value and print it if is < 0 */
+ if (ret < 0)
+ printf("set_bond_mac_addr error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_set_bond_mac_addr_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mac_addr_result, set, "set");
+cmdline_parse_token_string_t cmd_set_bond_mac_addr_bonding =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mac_addr_result, bonding,
+ "bonding");
+cmdline_parse_token_string_t cmd_set_bond_mac_addr_mac =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mac_addr_result, mac_addr,
+ "mac_addr");
+cmdline_parse_token_num_t cmd_set_bond_mac_addr_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mac_addr_result, port_num, UINT8);
+cmdline_parse_token_etheraddr_t cmd_set_bond_mac_addr_addr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_bond_mac_addr_result, address);
+
+cmdline_parse_inst_t cmd_set_bond_mac_addr = {
+ .f = cmd_set_bond_mac_addr_parsed,
+ .data = (void *) 0,
+ .help_str = "set bonding mac_addr (port_id) (address): ",
+ .tokens = {
+ (void *)&cmd_set_bond_mac_addr_set,
+ (void *)&cmd_set_bond_mac_addr_bonding,
+ (void *)&cmd_set_bond_mac_addr_mac,
+ (void *)&cmd_set_bond_mac_addr_portnum,
+ (void *)&cmd_set_bond_mac_addr_addr,
+ NULL
+ }
+};
+
+
+/* *** SET LINK STATUS MONITORING POLLING PERIOD ON BONDED DEVICE *** */
+struct cmd_set_bond_mon_period_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t mon_period;
+ uint8_t port_num;
+ uint32_t period_ms;
+};
+
+static void cmd_set_bond_mon_period_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bond_mon_period_result *res = parsed_result;
+ int ret;
+
+ if (res->port_num >= nb_ports) {
+ printf("Port id %d must be less than %d\n", res->port_num, nb_ports);
+ return;
+ }
+
+ ret = rte_eth_bond_link_monitoring_set(res->port_num, res->period_ms);
+
+ /* check the return value and print it if is < 0 */
+ if (ret < 0)
+ printf("set_bond_mac_addr error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_set_bond_mon_period_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mon_period_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_bond_mon_period_bonding =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mon_period_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_set_bond_mon_period_mon_period =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mon_period_result,
+ mon_period, "mon_period");
+cmdline_parse_token_num_t cmd_set_bond_mon_period_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mon_period_result,
+ port_num, UINT8);
+cmdline_parse_token_num_t cmd_set_bond_mon_period_period_ms =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mon_period_result,
+ period_ms, UINT32);
+
+cmdline_parse_inst_t cmd_set_bond_mon_period = {
+ .f = cmd_set_bond_mon_period_parsed,
+ .data = (void *) 0,
+ .help_str = "set bonding mon_period (port_id) (period_ms): ",
+ .tokens = {
+ (void *)&cmd_set_bond_mon_period_set,
+ (void *)&cmd_set_bond_mon_period_bonding,
+ (void *)&cmd_set_bond_mon_period_mon_period,
+ (void *)&cmd_set_bond_mon_period_portnum,
+ (void *)&cmd_set_bond_mon_period_period_ms,
+ NULL
+ }
+};
+
+#endif /* RTE_LIBRTE_PMD_BOND */
+
+/* *** SET FORWARDING MODE *** */
+struct cmd_set_fwd_mode_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t fwd;
+ cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_fwd_mode_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_fwd_mode_result *res = parsed_result;
+
+ set_pkt_forwarding_mode(res->mode);
+}
+
+cmdline_parse_token_string_t cmd_setfwd_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, set, "set");
+cmdline_parse_token_string_t cmd_setfwd_fwd =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, fwd, "fwd");
+cmdline_parse_token_string_t cmd_setfwd_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, mode,
+ "" /* defined at init */);
+
+cmdline_parse_inst_t cmd_set_fwd_mode = {
+ .f = cmd_set_fwd_mode_parsed,
+ .data = NULL,
+ .help_str = NULL, /* defined at init */
+ .tokens = {
+ (void *)&cmd_setfwd_set,
+ (void *)&cmd_setfwd_fwd,
+ (void *)&cmd_setfwd_mode,
+ NULL,
+ },
+};
+
+static void cmd_set_fwd_mode_init(void)
+{
+ char *modes, *c;
+ static char token[128];
+ static char help[256];
+ cmdline_parse_token_string_t *token_struct;
+
+ modes = list_pkt_forwarding_modes();
+ snprintf(help, sizeof help, "set fwd %s - "
+ "set packet forwarding mode", modes);
+ cmd_set_fwd_mode.help_str = help;
+
+ /* string token separator is # */
+ for (c = token; *modes != '\0'; modes++)
+ if (*modes == '|')
+ *c++ = '#';
+ else
+ *c++ = *modes;
+ token_struct = (cmdline_parse_token_string_t*)cmd_set_fwd_mode.tokens[2];
+ token_struct->string_data.str = token;
+}
+
+/* *** SET BURST TX DELAY TIME RETRY NUMBER *** */
+struct cmd_set_burst_tx_retry_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t burst;
+ cmdline_fixed_string_t tx;
+ cmdline_fixed_string_t delay;
+ uint32_t time;
+ cmdline_fixed_string_t retry;
+ uint32_t retry_num;
+};
+
+static void cmd_set_burst_tx_retry_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_burst_tx_retry_result *res = parsed_result;
+
+ if (!strcmp(res->set, "set") && !strcmp(res->burst, "burst")
+ && !strcmp(res->tx, "tx")) {
+ if (!strcmp(res->delay, "delay"))
+ burst_tx_delay_time = res->time;
+ if (!strcmp(res->retry, "retry"))
+ burst_tx_retry_num = res->retry_num;
+ }
+
+}
+
+cmdline_parse_token_string_t cmd_set_burst_tx_retry_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, set, "set");
+cmdline_parse_token_string_t cmd_set_burst_tx_retry_burst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, burst,
+ "burst");
+cmdline_parse_token_string_t cmd_set_burst_tx_retry_tx =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, tx, "tx");
+cmdline_parse_token_string_t cmd_set_burst_tx_retry_delay =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, delay, "delay");
+cmdline_parse_token_num_t cmd_set_burst_tx_retry_time =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_burst_tx_retry_result, time, UINT32);
+cmdline_parse_token_string_t cmd_set_burst_tx_retry_retry =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, retry, "retry");
+cmdline_parse_token_num_t cmd_set_burst_tx_retry_retry_num =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_burst_tx_retry_result, retry_num, UINT32);
+
+cmdline_parse_inst_t cmd_set_burst_tx_retry = {
+ .f = cmd_set_burst_tx_retry_parsed,
+ .help_str = "set burst tx delay (time_by_useconds) retry (retry_num)",
+ .tokens = {
+ (void *)&cmd_set_burst_tx_retry_set,
+ (void *)&cmd_set_burst_tx_retry_burst,
+ (void *)&cmd_set_burst_tx_retry_tx,
+ (void *)&cmd_set_burst_tx_retry_delay,
+ (void *)&cmd_set_burst_tx_retry_time,
+ (void *)&cmd_set_burst_tx_retry_retry,
+ (void *)&cmd_set_burst_tx_retry_retry_num,
+ NULL,
+ },
+};
+
+/* *** SET PROMISC MODE *** */
+struct cmd_set_promisc_mode_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t promisc;
+ cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */
+ uint8_t port_num; /* valid if "allports" argument == 0 */
+ cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_promisc_mode_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *allports)
+{
+ struct cmd_set_promisc_mode_result *res = parsed_result;
+ int enable;
+ portid_t i;
+
+ if (!strcmp(res->mode, "on"))
+ enable = 1;
+ else
+ enable = 0;
+
+ /* all ports */
+ if (allports) {
+ FOREACH_PORT(i, ports) {
+ if (enable)
+ rte_eth_promiscuous_enable(i);
+ else
+ rte_eth_promiscuous_disable(i);
+ }
+ }
+ else {
+ if (enable)
+ rte_eth_promiscuous_enable(res->port_num);
+ else
+ rte_eth_promiscuous_disable(res->port_num);
+ }
+}
+
+cmdline_parse_token_string_t cmd_setpromisc_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, set, "set");
+cmdline_parse_token_string_t cmd_setpromisc_promisc =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, promisc,
+ "promisc");
+cmdline_parse_token_string_t cmd_setpromisc_portall =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, port_all,
+ "all");
+cmdline_parse_token_num_t cmd_setpromisc_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_promisc_mode_result, port_num,
+ UINT8);
+cmdline_parse_token_string_t cmd_setpromisc_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, mode,
+ "on#off");
+
+cmdline_parse_inst_t cmd_set_promisc_mode_all = {
+ .f = cmd_set_promisc_mode_parsed,
+ .data = (void *)1,
+ .help_str = "set promisc all on|off: set promisc mode for all ports",
+ .tokens = {
+ (void *)&cmd_setpromisc_set,
+ (void *)&cmd_setpromisc_promisc,
+ (void *)&cmd_setpromisc_portall,
+ (void *)&cmd_setpromisc_mode,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_promisc_mode_one = {
+ .f = cmd_set_promisc_mode_parsed,
+ .data = (void *)0,
+ .help_str = "set promisc X on|off: set promisc mode on port X",
+ .tokens = {
+ (void *)&cmd_setpromisc_set,
+ (void *)&cmd_setpromisc_promisc,
+ (void *)&cmd_setpromisc_portnum,
+ (void *)&cmd_setpromisc_mode,
+ NULL,
+ },
+};
+
+/* *** SET ALLMULTI MODE *** */
+struct cmd_set_allmulti_mode_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t allmulti;
+ cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */
+ uint8_t port_num; /* valid if "allports" argument == 0 */
+ cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_allmulti_mode_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *allports)
+{
+ struct cmd_set_allmulti_mode_result *res = parsed_result;
+ int enable;
+ portid_t i;
+
+ if (!strcmp(res->mode, "on"))
+ enable = 1;
+ else
+ enable = 0;
+
+ /* all ports */
+ if (allports) {
+ FOREACH_PORT(i, ports) {
+ if (enable)
+ rte_eth_allmulticast_enable(i);
+ else
+ rte_eth_allmulticast_disable(i);
+ }
+ }
+ else {
+ if (enable)
+ rte_eth_allmulticast_enable(res->port_num);
+ else
+ rte_eth_allmulticast_disable(res->port_num);
+ }
+}
+
+cmdline_parse_token_string_t cmd_setallmulti_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, set, "set");
+cmdline_parse_token_string_t cmd_setallmulti_allmulti =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, allmulti,
+ "allmulti");
+cmdline_parse_token_string_t cmd_setallmulti_portall =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, port_all,
+ "all");
+cmdline_parse_token_num_t cmd_setallmulti_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_allmulti_mode_result, port_num,
+ UINT8);
+cmdline_parse_token_string_t cmd_setallmulti_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, mode,
+ "on#off");
+
+cmdline_parse_inst_t cmd_set_allmulti_mode_all = {
+ .f = cmd_set_allmulti_mode_parsed,
+ .data = (void *)1,
+ .help_str = "set allmulti all on|off: set allmulti mode for all ports",
+ .tokens = {
+ (void *)&cmd_setallmulti_set,
+ (void *)&cmd_setallmulti_allmulti,
+ (void *)&cmd_setallmulti_portall,
+ (void *)&cmd_setallmulti_mode,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_allmulti_mode_one = {
+ .f = cmd_set_allmulti_mode_parsed,
+ .data = (void *)0,
+ .help_str = "set allmulti X on|off: set allmulti mode on port X",
+ .tokens = {
+ (void *)&cmd_setallmulti_set,
+ (void *)&cmd_setallmulti_allmulti,
+ (void *)&cmd_setallmulti_portnum,
+ (void *)&cmd_setallmulti_mode,
+ NULL,
+ },
+};
+
+/* *** SETUP ETHERNET LINK FLOW CONTROL *** */
+struct cmd_link_flow_ctrl_set_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t flow_ctrl;
+ cmdline_fixed_string_t rx;
+ cmdline_fixed_string_t rx_lfc_mode;
+ cmdline_fixed_string_t tx;
+ cmdline_fixed_string_t tx_lfc_mode;
+ cmdline_fixed_string_t mac_ctrl_frame_fwd;
+ cmdline_fixed_string_t mac_ctrl_frame_fwd_mode;
+ cmdline_fixed_string_t autoneg_str;
+ cmdline_fixed_string_t autoneg;
+ cmdline_fixed_string_t hw_str;
+ uint32_t high_water;
+ cmdline_fixed_string_t lw_str;
+ uint32_t low_water;
+ cmdline_fixed_string_t pt_str;
+ uint16_t pause_time;
+ cmdline_fixed_string_t xon_str;
+ uint16_t send_xon;
+ uint8_t port_id;
+};
+
+cmdline_parse_token_string_t cmd_lfc_set_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_lfc_set_flow_ctrl =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ flow_ctrl, "flow_ctrl");
+cmdline_parse_token_string_t cmd_lfc_set_rx =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ rx, "rx");
+cmdline_parse_token_string_t cmd_lfc_set_rx_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ rx_lfc_mode, "on#off");
+cmdline_parse_token_string_t cmd_lfc_set_tx =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ tx, "tx");
+cmdline_parse_token_string_t cmd_lfc_set_tx_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ tx_lfc_mode, "on#off");
+cmdline_parse_token_string_t cmd_lfc_set_high_water_str =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ hw_str, "high_water");
+cmdline_parse_token_num_t cmd_lfc_set_high_water =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ high_water, UINT32);
+cmdline_parse_token_string_t cmd_lfc_set_low_water_str =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ lw_str, "low_water");
+cmdline_parse_token_num_t cmd_lfc_set_low_water =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ low_water, UINT32);
+cmdline_parse_token_string_t cmd_lfc_set_pause_time_str =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ pt_str, "pause_time");
+cmdline_parse_token_num_t cmd_lfc_set_pause_time =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ pause_time, UINT16);
+cmdline_parse_token_string_t cmd_lfc_set_send_xon_str =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ xon_str, "send_xon");
+cmdline_parse_token_num_t cmd_lfc_set_send_xon =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ send_xon, UINT16);
+cmdline_parse_token_string_t cmd_lfc_set_mac_ctrl_frame_fwd_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ mac_ctrl_frame_fwd, "mac_ctrl_frame_fwd");
+cmdline_parse_token_string_t cmd_lfc_set_mac_ctrl_frame_fwd =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ mac_ctrl_frame_fwd_mode, "on#off");
+cmdline_parse_token_string_t cmd_lfc_set_autoneg_str =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ autoneg_str, "autoneg");
+cmdline_parse_token_string_t cmd_lfc_set_autoneg =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ autoneg, "on#off");
+cmdline_parse_token_num_t cmd_lfc_set_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+ port_id, UINT8);
+
+/* forward declaration */
+static void
+cmd_link_flow_ctrl_set_parsed(void *parsed_result, struct cmdline *cl,
+ void *data);
+
+cmdline_parse_inst_t cmd_link_flow_control_set = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = NULL,
+ .help_str = "Configure the Ethernet flow control: set flow_ctrl rx on|off \
+tx on|off high_water low_water pause_time send_xon mac_ctrl_frame_fwd on|off \
+autoneg on|off port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_rx,
+ (void *)&cmd_lfc_set_rx_mode,
+ (void *)&cmd_lfc_set_tx,
+ (void *)&cmd_lfc_set_tx_mode,
+ (void *)&cmd_lfc_set_high_water,
+ (void *)&cmd_lfc_set_low_water,
+ (void *)&cmd_lfc_set_pause_time,
+ (void *)&cmd_lfc_set_send_xon,
+ (void *)&cmd_lfc_set_mac_ctrl_frame_fwd_mode,
+ (void *)&cmd_lfc_set_mac_ctrl_frame_fwd,
+ (void *)&cmd_lfc_set_autoneg_str,
+ (void *)&cmd_lfc_set_autoneg,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_link_flow_control_set_rx = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = (void *)&cmd_link_flow_control_set_rx,
+ .help_str = "Change rx flow control parameter: set flow_ctrl "
+ "rx on|off port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_rx,
+ (void *)&cmd_lfc_set_rx_mode,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_link_flow_control_set_tx = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = (void *)&cmd_link_flow_control_set_tx,
+ .help_str = "Change tx flow control parameter: set flow_ctrl "
+ "tx on|off port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_tx,
+ (void *)&cmd_lfc_set_tx_mode,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_link_flow_control_set_hw = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = (void *)&cmd_link_flow_control_set_hw,
+ .help_str = "Change high water flow control parameter: set flow_ctrl "
+ "high_water value port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_high_water_str,
+ (void *)&cmd_lfc_set_high_water,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_link_flow_control_set_lw = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = (void *)&cmd_link_flow_control_set_lw,
+ .help_str = "Change low water flow control parameter: set flow_ctrl "
+ "low_water value port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_low_water_str,
+ (void *)&cmd_lfc_set_low_water,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_link_flow_control_set_pt = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = (void *)&cmd_link_flow_control_set_pt,
+ .help_str = "Change pause time flow control parameter: set flow_ctrl "
+ "pause_time value port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_pause_time_str,
+ (void *)&cmd_lfc_set_pause_time,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_link_flow_control_set_xon = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = (void *)&cmd_link_flow_control_set_xon,
+ .help_str = "Change send_xon flow control parameter: set flow_ctrl "
+ "send_xon value port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_send_xon_str,
+ (void *)&cmd_lfc_set_send_xon,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_link_flow_control_set_macfwd = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = (void *)&cmd_link_flow_control_set_macfwd,
+ .help_str = "Change mac ctrl fwd flow control parameter: set flow_ctrl "
+ "mac_ctrl_frame_fwd on|off port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_mac_ctrl_frame_fwd_mode,
+ (void *)&cmd_lfc_set_mac_ctrl_frame_fwd,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_link_flow_control_set_autoneg = {
+ .f = cmd_link_flow_ctrl_set_parsed,
+ .data = (void *)&cmd_link_flow_control_set_autoneg,
+ .help_str = "Change autoneg flow control parameter: set flow_ctrl "
+ "autoneg on|off port_id",
+ .tokens = {
+ (void *)&cmd_lfc_set_set,
+ (void *)&cmd_lfc_set_flow_ctrl,
+ (void *)&cmd_lfc_set_autoneg_str,
+ (void *)&cmd_lfc_set_autoneg,
+ (void *)&cmd_lfc_set_portid,
+ NULL,
+ },
+};
+
+static void
+cmd_link_flow_ctrl_set_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_link_flow_ctrl_set_result *res = parsed_result;
+ cmdline_parse_inst_t *cmd = data;
+ struct rte_eth_fc_conf fc_conf;
+ int rx_fc_en = 0;
+ int tx_fc_en = 0;
+ int ret;
+
+ /*
+ * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
+ * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+ * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
+ * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+ */
+ static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
+ {RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+ };
+
+ /* Partial command line, retrieve current configuration */
+ if (cmd) {
+ ret = rte_eth_dev_flow_ctrl_get(res->port_id, &fc_conf);
+ if (ret != 0) {
+ printf("cannot get current flow ctrl parameters, return"
+ "code = %d\n", ret);
+ return;
+ }
+
+ if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
+ (fc_conf.mode == RTE_FC_FULL))
+ rx_fc_en = 1;
+ if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
+ (fc_conf.mode == RTE_FC_FULL))
+ tx_fc_en = 1;
+ }
+
+ if (!cmd || cmd == &cmd_link_flow_control_set_rx)
+ rx_fc_en = (!strcmp(res->rx_lfc_mode, "on")) ? 1 : 0;
+
+ if (!cmd || cmd == &cmd_link_flow_control_set_tx)
+ tx_fc_en = (!strcmp(res->tx_lfc_mode, "on")) ? 1 : 0;
+
+ fc_conf.mode = rx_tx_onoff_2_lfc_mode[rx_fc_en][tx_fc_en];
+
+ if (!cmd || cmd == &cmd_link_flow_control_set_hw)
+ fc_conf.high_water = res->high_water;
+
+ if (!cmd || cmd == &cmd_link_flow_control_set_lw)
+ fc_conf.low_water = res->low_water;
+
+ if (!cmd || cmd == &cmd_link_flow_control_set_pt)
+ fc_conf.pause_time = res->pause_time;
+
+ if (!cmd || cmd == &cmd_link_flow_control_set_xon)
+ fc_conf.send_xon = res->send_xon;
+
+ if (!cmd || cmd == &cmd_link_flow_control_set_macfwd) {
+ if (!strcmp(res->mac_ctrl_frame_fwd_mode, "on"))
+ fc_conf.mac_ctrl_frame_fwd = 1;
+ else
+ fc_conf.mac_ctrl_frame_fwd = 0;
+ }
+
+ if (!cmd || cmd == &cmd_link_flow_control_set_autoneg)
+ fc_conf.autoneg = (!strcmp(res->autoneg, "on")) ? 1 : 0;
+
+ ret = rte_eth_dev_flow_ctrl_set(res->port_id, &fc_conf);
+ if (ret != 0)
+ printf("bad flow contrl parameter, return code = %d \n", ret);
+}
+
+/* *** SETUP ETHERNET PIRORITY FLOW CONTROL *** */
+struct cmd_priority_flow_ctrl_set_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t pfc_ctrl;
+ cmdline_fixed_string_t rx;
+ cmdline_fixed_string_t rx_pfc_mode;
+ cmdline_fixed_string_t tx;
+ cmdline_fixed_string_t tx_pfc_mode;
+ uint32_t high_water;
+ uint32_t low_water;
+ uint16_t pause_time;
+ uint8_t priority;
+ uint8_t port_id;
+};
+
+static void
+cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_priority_flow_ctrl_set_result *res = parsed_result;
+ struct rte_eth_pfc_conf pfc_conf;
+ int rx_fc_enable, tx_fc_enable;
+ int ret;
+
+ /*
+ * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
+ * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+ * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
+ * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+ */
+ static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
+ {RTE_FC_NONE, RTE_FC_RX_PAUSE}, {RTE_FC_TX_PAUSE, RTE_FC_FULL}
+ };
+
+ rx_fc_enable = (!strncmp(res->rx_pfc_mode, "on",2)) ? 1 : 0;
+ tx_fc_enable = (!strncmp(res->tx_pfc_mode, "on",2)) ? 1 : 0;
+ pfc_conf.fc.mode = rx_tx_onoff_2_pfc_mode[rx_fc_enable][tx_fc_enable];
+ pfc_conf.fc.high_water = res->high_water;
+ pfc_conf.fc.low_water = res->low_water;
+ pfc_conf.fc.pause_time = res->pause_time;
+ pfc_conf.priority = res->priority;
+
+ ret = rte_eth_dev_priority_flow_ctrl_set(res->port_id, &pfc_conf);
+ if (ret != 0)
+ printf("bad priority flow contrl parameter, return code = %d \n", ret);
+}
+
+cmdline_parse_token_string_t cmd_pfc_set_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_pfc_set_flow_ctrl =
+ TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ pfc_ctrl, "pfc_ctrl");
+cmdline_parse_token_string_t cmd_pfc_set_rx =
+ TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ rx, "rx");
+cmdline_parse_token_string_t cmd_pfc_set_rx_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ rx_pfc_mode, "on#off");
+cmdline_parse_token_string_t cmd_pfc_set_tx =
+ TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ tx, "tx");
+cmdline_parse_token_string_t cmd_pfc_set_tx_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ tx_pfc_mode, "on#off");
+cmdline_parse_token_num_t cmd_pfc_set_high_water =
+ TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ high_water, UINT32);
+cmdline_parse_token_num_t cmd_pfc_set_low_water =
+ TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ low_water, UINT32);
+cmdline_parse_token_num_t cmd_pfc_set_pause_time =
+ TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ pause_time, UINT16);
+cmdline_parse_token_num_t cmd_pfc_set_priority =
+ TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ priority, UINT8);
+cmdline_parse_token_num_t cmd_pfc_set_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_priority_flow_control_set = {
+ .f = cmd_priority_flow_ctrl_set_parsed,
+ .data = NULL,
+ .help_str = "Configure the Ethernet priority flow control: set pfc_ctrl rx on|off\n\
+ tx on|off high_water low_water pause_time priority port_id",
+ .tokens = {
+ (void *)&cmd_pfc_set_set,
+ (void *)&cmd_pfc_set_flow_ctrl,
+ (void *)&cmd_pfc_set_rx,
+ (void *)&cmd_pfc_set_rx_mode,
+ (void *)&cmd_pfc_set_tx,
+ (void *)&cmd_pfc_set_tx_mode,
+ (void *)&cmd_pfc_set_high_water,
+ (void *)&cmd_pfc_set_low_water,
+ (void *)&cmd_pfc_set_pause_time,
+ (void *)&cmd_pfc_set_priority,
+ (void *)&cmd_pfc_set_portid,
+ NULL,
+ },
+};
+
+/* *** RESET CONFIGURATION *** */
+struct cmd_reset_result {
+ cmdline_fixed_string_t reset;
+ cmdline_fixed_string_t def;
+};
+
+static void cmd_reset_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ cmdline_printf(cl, "Reset to default forwarding configuration...\n");
+ set_def_fwd_config();
+}
+
+cmdline_parse_token_string_t cmd_reset_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_reset_result, reset, "set");
+cmdline_parse_token_string_t cmd_reset_def =
+ TOKEN_STRING_INITIALIZER(struct cmd_reset_result, def,
+ "default");
+
+cmdline_parse_inst_t cmd_reset = {
+ .f = cmd_reset_parsed,
+ .data = NULL,
+ .help_str = "set default: reset default forwarding configuration",
+ .tokens = {
+ (void *)&cmd_reset_set,
+ (void *)&cmd_reset_def,
+ NULL,
+ },
+};
+
+/* *** START FORWARDING *** */
+struct cmd_start_result {
+ cmdline_fixed_string_t start;
+};
+
+cmdline_parse_token_string_t cmd_start_start =
+ TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start");
+
+static void cmd_start_parsed(__attribute__((unused)) void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ start_packet_forwarding(0);
+}
+
+cmdline_parse_inst_t cmd_start = {
+ .f = cmd_start_parsed,
+ .data = NULL,
+ .help_str = "start packet forwarding",
+ .tokens = {
+ (void *)&cmd_start_start,
+ NULL,
+ },
+};
+
+/* *** START FORWARDING WITH ONE TX BURST FIRST *** */
+struct cmd_start_tx_first_result {
+ cmdline_fixed_string_t start;
+ cmdline_fixed_string_t tx_first;
+};
+
+static void
+cmd_start_tx_first_parsed(__attribute__((unused)) void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ start_packet_forwarding(1);
+}
+
+cmdline_parse_token_string_t cmd_start_tx_first_start =
+ TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_result, start,
+ "start");
+cmdline_parse_token_string_t cmd_start_tx_first_tx_first =
+ TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_result,
+ tx_first, "tx_first");
+
+cmdline_parse_inst_t cmd_start_tx_first = {
+ .f = cmd_start_tx_first_parsed,
+ .data = NULL,
+ .help_str = "start packet forwarding, after sending 1 burst of packets",
+ .tokens = {
+ (void *)&cmd_start_tx_first_start,
+ (void *)&cmd_start_tx_first_tx_first,
+ NULL,
+ },
+};
+
+/* *** SET LINK UP *** */
+struct cmd_set_link_up_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t link_up;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+};
+
+cmdline_parse_token_string_t cmd_set_link_up_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_up_result, set, "set");
+cmdline_parse_token_string_t cmd_set_link_up_link_up =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_up_result, link_up,
+ "link-up");
+cmdline_parse_token_string_t cmd_set_link_up_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_up_result, port, "port");
+cmdline_parse_token_num_t cmd_set_link_up_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_link_up_result, port_id, UINT8);
+
+static void cmd_set_link_up_parsed(__attribute__((unused)) void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_link_up_result *res = parsed_result;
+ dev_set_link_up(res->port_id);
+}
+
+cmdline_parse_inst_t cmd_set_link_up = {
+ .f = cmd_set_link_up_parsed,
+ .data = NULL,
+ .help_str = "set link-up port (port id)",
+ .tokens = {
+ (void *)&cmd_set_link_up_set,
+ (void *)&cmd_set_link_up_link_up,
+ (void *)&cmd_set_link_up_port,
+ (void *)&cmd_set_link_up_port_id,
+ NULL,
+ },
+};
+
+/* *** SET LINK DOWN *** */
+struct cmd_set_link_down_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t link_down;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+};
+
+cmdline_parse_token_string_t cmd_set_link_down_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_down_result, set, "set");
+cmdline_parse_token_string_t cmd_set_link_down_link_down =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_down_result, link_down,
+ "link-down");
+cmdline_parse_token_string_t cmd_set_link_down_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_link_down_result, port, "port");
+cmdline_parse_token_num_t cmd_set_link_down_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_link_down_result, port_id, UINT8);
+
+static void cmd_set_link_down_parsed(
+ __attribute__((unused)) void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_link_down_result *res = parsed_result;
+ dev_set_link_down(res->port_id);
+}
+
+cmdline_parse_inst_t cmd_set_link_down = {
+ .f = cmd_set_link_down_parsed,
+ .data = NULL,
+ .help_str = "set link-down port (port id)",
+ .tokens = {
+ (void *)&cmd_set_link_down_set,
+ (void *)&cmd_set_link_down_link_down,
+ (void *)&cmd_set_link_down_port,
+ (void *)&cmd_set_link_down_port_id,
+ NULL,
+ },
+};
+
+/* *** SHOW CFG *** */
+struct cmd_showcfg_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t cfg;
+ cmdline_fixed_string_t what;
+};
+
+static void cmd_showcfg_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_showcfg_result *res = parsed_result;
+ if (!strcmp(res->what, "rxtx"))
+ rxtx_config_display();
+ else if (!strcmp(res->what, "cores"))
+ fwd_lcores_config_display();
+ else if (!strcmp(res->what, "fwd"))
+ fwd_config_display();
+ else if (!strcmp(res->what, "txpkts"))
+ show_tx_pkt_segments();
+}
+
+cmdline_parse_token_string_t cmd_showcfg_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, show, "show");
+cmdline_parse_token_string_t cmd_showcfg_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, cfg, "config");
+cmdline_parse_token_string_t cmd_showcfg_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, what,
+ "rxtx#cores#fwd#txpkts");
+
+cmdline_parse_inst_t cmd_showcfg = {
+ .f = cmd_showcfg_parsed,
+ .data = NULL,
+ .help_str = "show config rxtx|cores|fwd|txpkts",
+ .tokens = {
+ (void *)&cmd_showcfg_show,
+ (void *)&cmd_showcfg_port,
+ (void *)&cmd_showcfg_what,
+ NULL,
+ },
+};
+
+/* *** SHOW ALL PORT INFO *** */
+struct cmd_showportall_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t all;
+};
+
+static void cmd_showportall_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ portid_t i;
+
+ struct cmd_showportall_result *res = parsed_result;
+ if (!strcmp(res->show, "clear")) {
+ if (!strcmp(res->what, "stats"))
+ FOREACH_PORT(i, ports)
+ nic_stats_clear(i);
+ else if (!strcmp(res->what, "xstats"))
+ FOREACH_PORT(i, ports)
+ nic_xstats_clear(i);
+ } else if (!strcmp(res->what, "info"))
+ FOREACH_PORT(i, ports)
+ port_infos_display(i);
+ else if (!strcmp(res->what, "stats"))
+ FOREACH_PORT(i, ports)
+ nic_stats_display(i);
+ else if (!strcmp(res->what, "xstats"))
+ FOREACH_PORT(i, ports)
+ nic_xstats_display(i);
+ else if (!strcmp(res->what, "fdir"))
+ FOREACH_PORT(i, ports)
+ fdir_get_infos(i);
+ else if (!strcmp(res->what, "stat_qmap"))
+ FOREACH_PORT(i, ports)
+ nic_stats_mapping_display(i);
+ else if (!strcmp(res->what, "dcb_tc"))
+ FOREACH_PORT(i, ports)
+ port_dcb_info_display(i);
+}
+
+cmdline_parse_token_string_t cmd_showportall_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, show,
+ "show#clear");
+cmdline_parse_token_string_t cmd_showportall_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, port, "port");
+cmdline_parse_token_string_t cmd_showportall_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, what,
+ "info#stats#xstats#fdir#stat_qmap#dcb_tc");
+cmdline_parse_token_string_t cmd_showportall_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, all, "all");
+cmdline_parse_inst_t cmd_showportall = {
+ .f = cmd_showportall_parsed,
+ .data = NULL,
+ .help_str = "show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc all",
+ .tokens = {
+ (void *)&cmd_showportall_show,
+ (void *)&cmd_showportall_port,
+ (void *)&cmd_showportall_what,
+ (void *)&cmd_showportall_all,
+ NULL,
+ },
+};
+
+/* *** SHOW PORT INFO *** */
+struct cmd_showport_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t what;
+ uint8_t portnum;
+};
+
+static void cmd_showport_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_showport_result *res = parsed_result;
+ if (!strcmp(res->show, "clear")) {
+ if (!strcmp(res->what, "stats"))
+ nic_stats_clear(res->portnum);
+ else if (!strcmp(res->what, "xstats"))
+ nic_xstats_clear(res->portnum);
+ } else if (!strcmp(res->what, "info"))
+ port_infos_display(res->portnum);
+ else if (!strcmp(res->what, "stats"))
+ nic_stats_display(res->portnum);
+ else if (!strcmp(res->what, "xstats"))
+ nic_xstats_display(res->portnum);
+ else if (!strcmp(res->what, "fdir"))
+ fdir_get_infos(res->portnum);
+ else if (!strcmp(res->what, "stat_qmap"))
+ nic_stats_mapping_display(res->portnum);
+ else if (!strcmp(res->what, "dcb_tc"))
+ port_dcb_info_display(res->portnum);
+}
+
+cmdline_parse_token_string_t cmd_showport_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_result, show,
+ "show#clear");
+cmdline_parse_token_string_t cmd_showport_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_result, port, "port");
+cmdline_parse_token_string_t cmd_showport_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_showport_result, what,
+ "info#stats#xstats#fdir#stat_qmap#dcb_tc");
+cmdline_parse_token_num_t cmd_showport_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, UINT8);
+
+cmdline_parse_inst_t cmd_showport = {
+ .f = cmd_showport_parsed,
+ .data = NULL,
+ .help_str = "show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc X (X = port number)",
+ .tokens = {
+ (void *)&cmd_showport_show,
+ (void *)&cmd_showport_port,
+ (void *)&cmd_showport_what,
+ (void *)&cmd_showport_portnum,
+ NULL,
+ },
+};
+
+/* *** SHOW QUEUE INFO *** */
+struct cmd_showqueue_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t type;
+ cmdline_fixed_string_t what;
+ uint8_t portnum;
+ uint16_t queuenum;
+};
+
+static void
+cmd_showqueue_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_showqueue_result *res = parsed_result;
+
+ if (!strcmp(res->type, "rxq"))
+ rx_queue_infos_display(res->portnum, res->queuenum);
+ else if (!strcmp(res->type, "txq"))
+ tx_queue_infos_display(res->portnum, res->queuenum);
+}
+
+cmdline_parse_token_string_t cmd_showqueue_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show");
+cmdline_parse_token_string_t cmd_showqueue_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq");
+cmdline_parse_token_string_t cmd_showqueue_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
+cmdline_parse_token_num_t cmd_showqueue_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+cmdline_parse_token_num_t cmd_showqueue_queuenum =
+ TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
+
+cmdline_parse_inst_t cmd_showqueue = {
+ .f = cmd_showqueue_parsed,
+ .data = NULL,
+ .help_str = "show rxq|txq info <port number> <queue_number>",
+ .tokens = {
+ (void *)&cmd_showqueue_show,
+ (void *)&cmd_showqueue_type,
+ (void *)&cmd_showqueue_what,
+ (void *)&cmd_showqueue_portnum,
+ (void *)&cmd_showqueue_queuenum,
+ NULL,
+ },
+};
+
+/* *** READ PORT REGISTER *** */
+struct cmd_read_reg_result {
+ cmdline_fixed_string_t read;
+ cmdline_fixed_string_t reg;
+ uint8_t port_id;
+ uint32_t reg_off;
+};
+
+static void
+cmd_read_reg_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_read_reg_result *res = parsed_result;
+ port_reg_display(res->port_id, res->reg_off);
+}
+
+cmdline_parse_token_string_t cmd_read_reg_read =
+ TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, read, "read");
+cmdline_parse_token_string_t cmd_read_reg_reg =
+ TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, reg, "reg");
+cmdline_parse_token_num_t cmd_read_reg_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_read_reg_reg_off =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, reg_off, UINT32);
+
+cmdline_parse_inst_t cmd_read_reg = {
+ .f = cmd_read_reg_parsed,
+ .data = NULL,
+ .help_str = "read reg port_id reg_off",
+ .tokens = {
+ (void *)&cmd_read_reg_read,
+ (void *)&cmd_read_reg_reg,
+ (void *)&cmd_read_reg_port_id,
+ (void *)&cmd_read_reg_reg_off,
+ NULL,
+ },
+};
+
+/* *** READ PORT REGISTER BIT FIELD *** */
+struct cmd_read_reg_bit_field_result {
+ cmdline_fixed_string_t read;
+ cmdline_fixed_string_t regfield;
+ uint8_t port_id;
+ uint32_t reg_off;
+ uint8_t bit1_pos;
+ uint8_t bit2_pos;
+};
+
+static void
+cmd_read_reg_bit_field_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_read_reg_bit_field_result *res = parsed_result;
+ port_reg_bit_field_display(res->port_id, res->reg_off,
+ res->bit1_pos, res->bit2_pos);
+}
+
+cmdline_parse_token_string_t cmd_read_reg_bit_field_read =
+ TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_field_result, read,
+ "read");
+cmdline_parse_token_string_t cmd_read_reg_bit_field_regfield =
+ TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_field_result,
+ regfield, "regfield");
+cmdline_parse_token_num_t cmd_read_reg_bit_field_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, port_id,
+ UINT8);
+cmdline_parse_token_num_t cmd_read_reg_bit_field_reg_off =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, reg_off,
+ UINT32);
+cmdline_parse_token_num_t cmd_read_reg_bit_field_bit1_pos =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, bit1_pos,
+ UINT8);
+cmdline_parse_token_num_t cmd_read_reg_bit_field_bit2_pos =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, bit2_pos,
+ UINT8);
+
+cmdline_parse_inst_t cmd_read_reg_bit_field = {
+ .f = cmd_read_reg_bit_field_parsed,
+ .data = NULL,
+ .help_str = "read regfield port_id reg_off bit_x bit_y "
+ "(read register bit field between bit_x and bit_y included)",
+ .tokens = {
+ (void *)&cmd_read_reg_bit_field_read,
+ (void *)&cmd_read_reg_bit_field_regfield,
+ (void *)&cmd_read_reg_bit_field_port_id,
+ (void *)&cmd_read_reg_bit_field_reg_off,
+ (void *)&cmd_read_reg_bit_field_bit1_pos,
+ (void *)&cmd_read_reg_bit_field_bit2_pos,
+ NULL,
+ },
+};
+
+/* *** READ PORT REGISTER BIT *** */
+struct cmd_read_reg_bit_result {
+ cmdline_fixed_string_t read;
+ cmdline_fixed_string_t regbit;
+ uint8_t port_id;
+ uint32_t reg_off;
+ uint8_t bit_pos;
+};
+
+static void
+cmd_read_reg_bit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_read_reg_bit_result *res = parsed_result;
+ port_reg_bit_display(res->port_id, res->reg_off, res->bit_pos);
+}
+
+cmdline_parse_token_string_t cmd_read_reg_bit_read =
+ TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result, read, "read");
+cmdline_parse_token_string_t cmd_read_reg_bit_regbit =
+ TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result,
+ regbit, "regbit");
+cmdline_parse_token_num_t cmd_read_reg_bit_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_read_reg_bit_reg_off =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, reg_off, UINT32);
+cmdline_parse_token_num_t cmd_read_reg_bit_bit_pos =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, bit_pos, UINT8);
+
+cmdline_parse_inst_t cmd_read_reg_bit = {
+ .f = cmd_read_reg_bit_parsed,
+ .data = NULL,
+ .help_str = "read regbit port_id reg_off bit_x (0 <= bit_x <= 31)",
+ .tokens = {
+ (void *)&cmd_read_reg_bit_read,
+ (void *)&cmd_read_reg_bit_regbit,
+ (void *)&cmd_read_reg_bit_port_id,
+ (void *)&cmd_read_reg_bit_reg_off,
+ (void *)&cmd_read_reg_bit_bit_pos,
+ NULL,
+ },
+};
+
+/* *** WRITE PORT REGISTER *** */
+struct cmd_write_reg_result {
+ cmdline_fixed_string_t write;
+ cmdline_fixed_string_t reg;
+ uint8_t port_id;
+ uint32_t reg_off;
+ uint32_t value;
+};
+
+static void
+cmd_write_reg_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_write_reg_result *res = parsed_result;
+ port_reg_set(res->port_id, res->reg_off, res->value);
+}
+
+cmdline_parse_token_string_t cmd_write_reg_write =
+ TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, write, "write");
+cmdline_parse_token_string_t cmd_write_reg_reg =
+ TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, reg, "reg");
+cmdline_parse_token_num_t cmd_write_reg_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_write_reg_reg_off =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, reg_off, UINT32);
+cmdline_parse_token_num_t cmd_write_reg_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, value, UINT32);
+
+cmdline_parse_inst_t cmd_write_reg = {
+ .f = cmd_write_reg_parsed,
+ .data = NULL,
+ .help_str = "write reg port_id reg_off reg_value",
+ .tokens = {
+ (void *)&cmd_write_reg_write,
+ (void *)&cmd_write_reg_reg,
+ (void *)&cmd_write_reg_port_id,
+ (void *)&cmd_write_reg_reg_off,
+ (void *)&cmd_write_reg_value,
+ NULL,
+ },
+};
+
+/* *** WRITE PORT REGISTER BIT FIELD *** */
+struct cmd_write_reg_bit_field_result {
+ cmdline_fixed_string_t write;
+ cmdline_fixed_string_t regfield;
+ uint8_t port_id;
+ uint32_t reg_off;
+ uint8_t bit1_pos;
+ uint8_t bit2_pos;
+ uint32_t value;
+};
+
+static void
+cmd_write_reg_bit_field_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_write_reg_bit_field_result *res = parsed_result;
+ port_reg_bit_field_set(res->port_id, res->reg_off,
+ res->bit1_pos, res->bit2_pos, res->value);
+}
+
+cmdline_parse_token_string_t cmd_write_reg_bit_field_write =
+ TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_field_result, write,
+ "write");
+cmdline_parse_token_string_t cmd_write_reg_bit_field_regfield =
+ TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_field_result,
+ regfield, "regfield");
+cmdline_parse_token_num_t cmd_write_reg_bit_field_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, port_id,
+ UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_field_reg_off =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, reg_off,
+ UINT32);
+cmdline_parse_token_num_t cmd_write_reg_bit_field_bit1_pos =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, bit1_pos,
+ UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_field_bit2_pos =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, bit2_pos,
+ UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_field_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, value,
+ UINT32);
+
+cmdline_parse_inst_t cmd_write_reg_bit_field = {
+ .f = cmd_write_reg_bit_field_parsed,
+ .data = NULL,
+ .help_str = "write regfield port_id reg_off bit_x bit_y reg_value"
+ "(set register bit field between bit_x and bit_y included)",
+ .tokens = {
+ (void *)&cmd_write_reg_bit_field_write,
+ (void *)&cmd_write_reg_bit_field_regfield,
+ (void *)&cmd_write_reg_bit_field_port_id,
+ (void *)&cmd_write_reg_bit_field_reg_off,
+ (void *)&cmd_write_reg_bit_field_bit1_pos,
+ (void *)&cmd_write_reg_bit_field_bit2_pos,
+ (void *)&cmd_write_reg_bit_field_value,
+ NULL,
+ },
+};
+
+/* *** WRITE PORT REGISTER BIT *** */
+struct cmd_write_reg_bit_result {
+ cmdline_fixed_string_t write;
+ cmdline_fixed_string_t regbit;
+ uint8_t port_id;
+ uint32_t reg_off;
+ uint8_t bit_pos;
+ uint8_t value;
+};
+
+static void
+cmd_write_reg_bit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_write_reg_bit_result *res = parsed_result;
+ port_reg_bit_set(res->port_id, res->reg_off, res->bit_pos, res->value);
+}
+
+cmdline_parse_token_string_t cmd_write_reg_bit_write =
+ TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result, write,
+ "write");
+cmdline_parse_token_string_t cmd_write_reg_bit_regbit =
+ TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result,
+ regbit, "regbit");
+cmdline_parse_token_num_t cmd_write_reg_bit_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_reg_off =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, reg_off, UINT32);
+cmdline_parse_token_num_t cmd_write_reg_bit_bit_pos =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, bit_pos, UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, value, UINT8);
+
+cmdline_parse_inst_t cmd_write_reg_bit = {
+ .f = cmd_write_reg_bit_parsed,
+ .data = NULL,
+ .help_str = "write regbit port_id reg_off bit_x 0/1 (0 <= bit_x <= 31)",
+ .tokens = {
+ (void *)&cmd_write_reg_bit_write,
+ (void *)&cmd_write_reg_bit_regbit,
+ (void *)&cmd_write_reg_bit_port_id,
+ (void *)&cmd_write_reg_bit_reg_off,
+ (void *)&cmd_write_reg_bit_bit_pos,
+ (void *)&cmd_write_reg_bit_value,
+ NULL,
+ },
+};
+
+/* *** READ A RING DESCRIPTOR OF A PORT RX/TX QUEUE *** */
+struct cmd_read_rxd_txd_result {
+ cmdline_fixed_string_t read;
+ cmdline_fixed_string_t rxd_txd;
+ uint8_t port_id;
+ uint16_t queue_id;
+ uint16_t desc_id;
+};
+
+static void
+cmd_read_rxd_txd_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_read_rxd_txd_result *res = parsed_result;
+
+ if (!strcmp(res->rxd_txd, "rxd"))
+ rx_ring_desc_display(res->port_id, res->queue_id, res->desc_id);
+ else if (!strcmp(res->rxd_txd, "txd"))
+ tx_ring_desc_display(res->port_id, res->queue_id, res->desc_id);
+}
+
+cmdline_parse_token_string_t cmd_read_rxd_txd_read =
+ TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, read, "read");
+cmdline_parse_token_string_t cmd_read_rxd_txd_rxd_txd =
+ TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, rxd_txd,
+ "rxd#txd");
+cmdline_parse_token_num_t cmd_read_rxd_txd_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_read_rxd_txd_queue_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, queue_id, UINT16);
+cmdline_parse_token_num_t cmd_read_rxd_txd_desc_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, desc_id, UINT16);
+
+cmdline_parse_inst_t cmd_read_rxd_txd = {
+ .f = cmd_read_rxd_txd_parsed,
+ .data = NULL,
+ .help_str = "read rxd|txd port_id queue_id rxd_id",
+ .tokens = {
+ (void *)&cmd_read_rxd_txd_read,
+ (void *)&cmd_read_rxd_txd_rxd_txd,
+ (void *)&cmd_read_rxd_txd_port_id,
+ (void *)&cmd_read_rxd_txd_queue_id,
+ (void *)&cmd_read_rxd_txd_desc_id,
+ NULL,
+ },
+};
+
+/* *** QUIT *** */
+struct cmd_quit_result {
+ cmdline_fixed_string_t quit;
+};
+
+static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ pmd_test_exit();
+ cmdline_quit(cl);
+}
+
+cmdline_parse_token_string_t cmd_quit_quit =
+ TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+cmdline_parse_inst_t cmd_quit = {
+ .f = cmd_quit_parsed,
+ .data = NULL,
+ .help_str = "exit application",
+ .tokens = {
+ (void *)&cmd_quit_quit,
+ NULL,
+ },
+};
+
+/* *** ADD/REMOVE MAC ADDRESS FROM A PORT *** */
+struct cmd_mac_addr_result {
+ cmdline_fixed_string_t mac_addr_cmd;
+ cmdline_fixed_string_t what;
+ uint8_t port_num;
+ struct ether_addr address;
+};
+
+static void cmd_mac_addr_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_mac_addr_result *res = parsed_result;
+ int ret;
+
+ if (strcmp(res->what, "add") == 0)
+ ret = rte_eth_dev_mac_addr_add(res->port_num, &res->address, 0);
+ else
+ ret = rte_eth_dev_mac_addr_remove(res->port_num, &res->address);
+
+ /* check the return value and print it if is < 0 */
+ if(ret < 0)
+ printf("mac_addr_cmd error: (%s)\n", strerror(-ret));
+
+}
+
+cmdline_parse_token_string_t cmd_mac_addr_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, mac_addr_cmd,
+ "mac_addr");
+cmdline_parse_token_string_t cmd_mac_addr_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, what,
+ "add#remove");
+cmdline_parse_token_num_t cmd_mac_addr_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_mac_addr_result, port_num, UINT8);
+cmdline_parse_token_etheraddr_t cmd_mac_addr_addr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_mac_addr_result, address);
+
+cmdline_parse_inst_t cmd_mac_addr = {
+ .f = cmd_mac_addr_parsed,
+ .data = (void *)0,
+ .help_str = "mac_addr add|remove X <address>: "
+ "add/remove MAC address on port X",
+ .tokens = {
+ (void *)&cmd_mac_addr_cmd,
+ (void *)&cmd_mac_addr_what,
+ (void *)&cmd_mac_addr_portnum,
+ (void *)&cmd_mac_addr_addr,
+ NULL,
+ },
+};
+
+
+/* *** CONFIGURE QUEUE STATS COUNTER MAPPINGS *** */
+struct cmd_set_qmap_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t qmap;
+ cmdline_fixed_string_t what;
+ uint8_t port_id;
+ uint16_t queue_id;
+ uint8_t map_value;
+};
+
+static void
+cmd_set_qmap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_qmap_result *res = parsed_result;
+ int is_rx = (strcmp(res->what, "tx") == 0) ? 0 : 1;
+
+ set_qmap(res->port_id, (uint8_t)is_rx, res->queue_id, res->map_value);
+}
+
+cmdline_parse_token_string_t cmd_setqmap_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_qmap_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setqmap_qmap =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_qmap_result,
+ qmap, "stat_qmap");
+cmdline_parse_token_string_t cmd_setqmap_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_qmap_result,
+ what, "tx#rx");
+cmdline_parse_token_num_t cmd_setqmap_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_qmap_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_setqmap_queueid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_qmap_result,
+ queue_id, UINT16);
+cmdline_parse_token_num_t cmd_setqmap_mapvalue =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_qmap_result,
+ map_value, UINT8);
+
+cmdline_parse_inst_t cmd_set_qmap = {
+ .f = cmd_set_qmap_parsed,
+ .data = NULL,
+ .help_str = "Set statistics mapping value on tx|rx queue_id of port_id",
+ .tokens = {
+ (void *)&cmd_setqmap_set,
+ (void *)&cmd_setqmap_qmap,
+ (void *)&cmd_setqmap_what,
+ (void *)&cmd_setqmap_portid,
+ (void *)&cmd_setqmap_queueid,
+ (void *)&cmd_setqmap_mapvalue,
+ NULL,
+ },
+};
+
+/* *** CONFIGURE UNICAST HASH TABLE *** */
+struct cmd_set_uc_hash_table {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t what;
+ struct ether_addr address;
+ cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_uc_hash_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret=0;
+ struct cmd_set_uc_hash_table *res = parsed_result;
+
+ int is_on = (strcmp(res->mode, "on") == 0) ? 1 : 0;
+
+ if (strcmp(res->what, "uta") == 0)
+ ret = rte_eth_dev_uc_hash_table_set(res->port_id,
+ &res->address,(uint8_t)is_on);
+ if (ret < 0)
+ printf("bad unicast hash table parameter, return code = %d \n", ret);
+
+}
+
+cmdline_parse_token_string_t cmd_set_uc_hash_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_uc_hash_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table,
+ port, "port");
+cmdline_parse_token_num_t cmd_set_uc_hash_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_uc_hash_table,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_uc_hash_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table,
+ what, "uta");
+cmdline_parse_token_etheraddr_t cmd_set_uc_hash_mac =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_uc_hash_table,
+ address);
+cmdline_parse_token_string_t cmd_set_uc_hash_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table,
+ mode, "on#off");
+
+cmdline_parse_inst_t cmd_set_uc_hash_filter = {
+ .f = cmd_set_uc_hash_parsed,
+ .data = NULL,
+ .help_str = "set port X uta Y on|off(X = port number,Y = MAC address)",
+ .tokens = {
+ (void *)&cmd_set_uc_hash_set,
+ (void *)&cmd_set_uc_hash_port,
+ (void *)&cmd_set_uc_hash_portid,
+ (void *)&cmd_set_uc_hash_what,
+ (void *)&cmd_set_uc_hash_mac,
+ (void *)&cmd_set_uc_hash_mode,
+ NULL,
+ },
+};
+
+struct cmd_set_uc_all_hash_table {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t value;
+ cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_uc_all_hash_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret=0;
+ struct cmd_set_uc_all_hash_table *res = parsed_result;
+
+ int is_on = (strcmp(res->mode, "on") == 0) ? 1 : 0;
+
+ if ((strcmp(res->what, "uta") == 0) &&
+ (strcmp(res->value, "all") == 0))
+ ret = rte_eth_dev_uc_all_hash_table_set(res->port_id,(uint8_t) is_on);
+ if (ret < 0)
+ printf("bad unicast hash table parameter,"
+ "return code = %d \n", ret);
+}
+
+cmdline_parse_token_string_t cmd_set_uc_all_hash_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_uc_all_hash_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table,
+ port, "port");
+cmdline_parse_token_num_t cmd_set_uc_all_hash_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_uc_all_hash_table,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_uc_all_hash_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table,
+ what, "uta");
+cmdline_parse_token_string_t cmd_set_uc_all_hash_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table,
+ value,"all");
+cmdline_parse_token_string_t cmd_set_uc_all_hash_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table,
+ mode, "on#off");
+
+cmdline_parse_inst_t cmd_set_uc_all_hash_filter = {
+ .f = cmd_set_uc_all_hash_parsed,
+ .data = NULL,
+ .help_str = "set port X uta all on|off (X = port number)",
+ .tokens = {
+ (void *)&cmd_set_uc_all_hash_set,
+ (void *)&cmd_set_uc_all_hash_port,
+ (void *)&cmd_set_uc_all_hash_portid,
+ (void *)&cmd_set_uc_all_hash_what,
+ (void *)&cmd_set_uc_all_hash_value,
+ (void *)&cmd_set_uc_all_hash_mode,
+ NULL,
+ },
+};
+
+/* *** CONFIGURE MACVLAN FILTER FOR VF(s) *** */
+struct cmd_set_vf_macvlan_filter {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t vf;
+ uint8_t vf_id;
+ struct ether_addr address;
+ cmdline_fixed_string_t filter_type;
+ cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_vf_macvlan_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int is_on, ret = 0;
+ struct cmd_set_vf_macvlan_filter *res = parsed_result;
+ struct rte_eth_mac_filter filter;
+
+ memset(&filter, 0, sizeof(struct rte_eth_mac_filter));
+
+ (void)rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
+
+ /* set VF MAC filter */
+ filter.is_vf = 1;
+
+ /* set VF ID */
+ filter.dst_id = res->vf_id;
+
+ if (!strcmp(res->filter_type, "exact-mac"))
+ filter.filter_type = RTE_MAC_PERFECT_MATCH;
+ else if (!strcmp(res->filter_type, "exact-mac-vlan"))
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ else if (!strcmp(res->filter_type, "hashmac"))
+ filter.filter_type = RTE_MAC_HASH_MATCH;
+ else if (!strcmp(res->filter_type, "hashmac-vlan"))
+ filter.filter_type = RTE_MACVLAN_HASH_MATCH;
+
+ is_on = (strcmp(res->mode, "on") == 0) ? 1 : 0;
+
+ if (is_on)
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_MACVLAN,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+ else
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_MACVLAN,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+
+ if (ret < 0)
+ printf("bad set MAC hash parameter, return code = %d\n", ret);
+
+}
+
+cmdline_parse_token_string_t cmd_set_vf_macvlan_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_vf_macvlan_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter,
+ port, "port");
+cmdline_parse_token_num_t cmd_set_vf_macvlan_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vf_macvlan_filter,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_vf_macvlan_vf =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter,
+ vf, "vf");
+cmdline_parse_token_num_t cmd_set_vf_macvlan_vf_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vf_macvlan_filter,
+ vf_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_set_vf_macvlan_mac =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vf_macvlan_filter,
+ address);
+cmdline_parse_token_string_t cmd_set_vf_macvlan_filter_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter,
+ filter_type, "exact-mac#exact-mac-vlan"
+ "#hashmac#hashmac-vlan");
+cmdline_parse_token_string_t cmd_set_vf_macvlan_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter,
+ mode, "on#off");
+
+cmdline_parse_inst_t cmd_set_vf_macvlan_filter = {
+ .f = cmd_set_vf_macvlan_parsed,
+ .data = NULL,
+ .help_str = "set port (portid) vf (vfid) (mac-addr) "
+ "(exact-mac|exact-mac-vlan|hashmac|hashmac-vlan) "
+ "on|off\n"
+ "exact match rule:exact match of MAC or MAC and VLAN; "
+ "hash match rule: hash match of MAC and exact match "
+ "of VLAN",
+ .tokens = {
+ (void *)&cmd_set_vf_macvlan_set,
+ (void *)&cmd_set_vf_macvlan_port,
+ (void *)&cmd_set_vf_macvlan_portid,
+ (void *)&cmd_set_vf_macvlan_vf,
+ (void *)&cmd_set_vf_macvlan_vf_id,
+ (void *)&cmd_set_vf_macvlan_mac,
+ (void *)&cmd_set_vf_macvlan_filter_type,
+ (void *)&cmd_set_vf_macvlan_mode,
+ NULL,
+ },
+};
+
+/* *** CONFIGURE VF TRAFFIC CONTROL *** */
+struct cmd_set_vf_traffic {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t vf;
+ uint8_t vf_id;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_vf_traffic_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_vf_traffic *res = parsed_result;
+ int is_rx = (strcmp(res->what, "rx") == 0) ? 1 : 0;
+ int is_on = (strcmp(res->mode, "on") == 0) ? 1 : 0;
+
+ set_vf_traffic(res->port_id, (uint8_t)is_rx, res->vf_id,(uint8_t) is_on);
+}
+
+cmdline_parse_token_string_t cmd_setvf_traffic_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic,
+ set, "set");
+cmdline_parse_token_string_t cmd_setvf_traffic_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic,
+ port, "port");
+cmdline_parse_token_num_t cmd_setvf_traffic_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vf_traffic,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_setvf_traffic_vf =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic,
+ vf, "vf");
+cmdline_parse_token_num_t cmd_setvf_traffic_vfid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vf_traffic,
+ vf_id, UINT8);
+cmdline_parse_token_string_t cmd_setvf_traffic_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic,
+ what, "tx#rx");
+cmdline_parse_token_string_t cmd_setvf_traffic_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic,
+ mode, "on#off");
+
+cmdline_parse_inst_t cmd_set_vf_traffic = {
+ .f = cmd_set_vf_traffic_parsed,
+ .data = NULL,
+ .help_str = "set port X vf Y rx|tx on|off"
+ "(X = port number,Y = vf id)",
+ .tokens = {
+ (void *)&cmd_setvf_traffic_set,
+ (void *)&cmd_setvf_traffic_port,
+ (void *)&cmd_setvf_traffic_portid,
+ (void *)&cmd_setvf_traffic_vf,
+ (void *)&cmd_setvf_traffic_vfid,
+ (void *)&cmd_setvf_traffic_what,
+ (void *)&cmd_setvf_traffic_mode,
+ NULL,
+ },
+};
+
+/* *** CONFIGURE VF RECEIVE MODE *** */
+struct cmd_set_vf_rxmode {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t vf;
+ uint8_t vf_id;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t mode;
+ cmdline_fixed_string_t on;
+};
+
+static void
+cmd_set_vf_rxmode_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret;
+ uint16_t rx_mode = 0;
+ struct cmd_set_vf_rxmode *res = parsed_result;
+
+ int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
+ if (!strcmp(res->what,"rxmode")) {
+ if (!strcmp(res->mode, "AUPE"))
+ rx_mode |= ETH_VMDQ_ACCEPT_UNTAG;
+ else if (!strcmp(res->mode, "ROPE"))
+ rx_mode |= ETH_VMDQ_ACCEPT_HASH_UC;
+ else if (!strcmp(res->mode, "BAM"))
+ rx_mode |= ETH_VMDQ_ACCEPT_BROADCAST;
+ else if (!strncmp(res->mode, "MPE",3))
+ rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
+ }
+
+ ret = rte_eth_dev_set_vf_rxmode(res->port_id,res->vf_id,rx_mode,(uint8_t)is_on);
+ if (ret < 0)
+ printf("bad VF receive mode parameter, return code = %d \n",
+ ret);
+}
+
+cmdline_parse_token_string_t cmd_set_vf_rxmode_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_vf_rxmode_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode,
+ port, "port");
+cmdline_parse_token_num_t cmd_set_vf_rxmode_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vf_rxmode,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_vf_rxmode_vf =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode,
+ vf, "vf");
+cmdline_parse_token_num_t cmd_set_vf_rxmode_vfid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vf_rxmode,
+ vf_id, UINT8);
+cmdline_parse_token_string_t cmd_set_vf_rxmode_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode,
+ what, "rxmode");
+cmdline_parse_token_string_t cmd_set_vf_rxmode_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode,
+ mode, "AUPE#ROPE#BAM#MPE");
+cmdline_parse_token_string_t cmd_set_vf_rxmode_on =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode,
+ on, "on#off");
+
+cmdline_parse_inst_t cmd_set_vf_rxmode = {
+ .f = cmd_set_vf_rxmode_parsed,
+ .data = NULL,
+ .help_str = "set port X vf Y rxmode AUPE|ROPE|BAM|MPE on|off",
+ .tokens = {
+ (void *)&cmd_set_vf_rxmode_set,
+ (void *)&cmd_set_vf_rxmode_port,
+ (void *)&cmd_set_vf_rxmode_portid,
+ (void *)&cmd_set_vf_rxmode_vf,
+ (void *)&cmd_set_vf_rxmode_vfid,
+ (void *)&cmd_set_vf_rxmode_what,
+ (void *)&cmd_set_vf_rxmode_mode,
+ (void *)&cmd_set_vf_rxmode_on,
+ NULL,
+ },
+};
+
+/* *** ADD MAC ADDRESS FILTER FOR A VF OF A PORT *** */
+struct cmd_vf_mac_addr_result {
+ cmdline_fixed_string_t mac_addr_cmd;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t port;
+ uint8_t port_num;
+ cmdline_fixed_string_t vf;
+ uint8_t vf_num;
+ struct ether_addr address;
+};
+
+static void cmd_vf_mac_addr_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_mac_addr_result *res = parsed_result;
+ int ret = 0;
+
+ if (strcmp(res->what, "add") == 0)
+ ret = rte_eth_dev_mac_addr_add(res->port_num,
+ &res->address, res->vf_num);
+ if(ret < 0)
+ printf("vf_mac_addr_cmd error: (%s)\n", strerror(-ret));
+
+}
+
+cmdline_parse_token_string_t cmd_vf_mac_addr_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result,
+ mac_addr_cmd,"mac_addr");
+cmdline_parse_token_string_t cmd_vf_mac_addr_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result,
+ what,"add");
+cmdline_parse_token_string_t cmd_vf_mac_addr_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result,
+ port,"port");
+cmdline_parse_token_num_t cmd_vf_mac_addr_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_mac_addr_result,
+ port_num, UINT8);
+cmdline_parse_token_string_t cmd_vf_mac_addr_vf =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result,
+ vf,"vf");
+cmdline_parse_token_num_t cmd_vf_mac_addr_vfnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_mac_addr_result,
+ vf_num, UINT8);
+cmdline_parse_token_etheraddr_t cmd_vf_mac_addr_addr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_vf_mac_addr_result,
+ address);
+
+cmdline_parse_inst_t cmd_vf_mac_addr_filter = {
+ .f = cmd_vf_mac_addr_parsed,
+ .data = (void *)0,
+ .help_str = "mac_addr add port X vf Y ethaddr:(X = port number,"
+ "Y = VF number)add MAC address filtering for a VF on port X",
+ .tokens = {
+ (void *)&cmd_vf_mac_addr_cmd,
+ (void *)&cmd_vf_mac_addr_what,
+ (void *)&cmd_vf_mac_addr_port,
+ (void *)&cmd_vf_mac_addr_portnum,
+ (void *)&cmd_vf_mac_addr_vf,
+ (void *)&cmd_vf_mac_addr_vfnum,
+ (void *)&cmd_vf_mac_addr_addr,
+ NULL,
+ },
+};
+
+/* *** ADD/REMOVE A VLAN IDENTIFIER TO/FROM A PORT VLAN RX FILTER *** */
+struct cmd_vf_rx_vlan_filter {
+ cmdline_fixed_string_t rx_vlan;
+ cmdline_fixed_string_t what;
+ uint16_t vlan_id;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t vf;
+ uint64_t vf_mask;
+};
+
+static void
+cmd_vf_rx_vlan_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_rx_vlan_filter *res = parsed_result;
+
+ if (!strcmp(res->what, "add"))
+ set_vf_rx_vlan(res->port_id, res->vlan_id,res->vf_mask, 1);
+ else
+ set_vf_rx_vlan(res->port_id, res->vlan_id,res->vf_mask, 0);
+}
+
+cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_rx_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter,
+ rx_vlan, "rx_vlan");
+cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter,
+ what, "add#rm");
+cmdline_parse_token_num_t cmd_vf_rx_vlan_filter_vlanid =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_rx_vlan_filter,
+ vlan_id, UINT16);
+cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter,
+ port, "port");
+cmdline_parse_token_num_t cmd_vf_rx_vlan_filter_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_rx_vlan_filter,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_vf =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter,
+ vf, "vf");
+cmdline_parse_token_num_t cmd_vf_rx_vlan_filter_vf_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_rx_vlan_filter,
+ vf_mask, UINT64);
+
+cmdline_parse_inst_t cmd_vf_rxvlan_filter = {
+ .f = cmd_vf_rx_vlan_filter_parsed,
+ .data = NULL,
+ .help_str = "rx_vlan add|rm X port Y vf Z (X = VLAN ID,"
+ "Y = port number,Z = hexadecimal VF mask)",
+ .tokens = {
+ (void *)&cmd_vf_rx_vlan_filter_rx_vlan,
+ (void *)&cmd_vf_rx_vlan_filter_what,
+ (void *)&cmd_vf_rx_vlan_filter_vlanid,
+ (void *)&cmd_vf_rx_vlan_filter_port,
+ (void *)&cmd_vf_rx_vlan_filter_portid,
+ (void *)&cmd_vf_rx_vlan_filter_vf,
+ (void *)&cmd_vf_rx_vlan_filter_vf_mask,
+ NULL,
+ },
+};
+
+/* *** SET RATE LIMIT FOR A QUEUE OF A PORT *** */
+struct cmd_queue_rate_limit_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_num;
+ cmdline_fixed_string_t queue;
+ uint8_t queue_num;
+ cmdline_fixed_string_t rate;
+ uint16_t rate_num;
+};
+
+static void cmd_queue_rate_limit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_queue_rate_limit_result *res = parsed_result;
+ int ret = 0;
+
+ if ((strcmp(res->set, "set") == 0) && (strcmp(res->port, "port") == 0)
+ && (strcmp(res->queue, "queue") == 0)
+ && (strcmp(res->rate, "rate") == 0))
+ ret = set_queue_rate_limit(res->port_num, res->queue_num,
+ res->rate_num);
+ if (ret < 0)
+ printf("queue_rate_limit_cmd error: (%s)\n", strerror(-ret));
+
+}
+
+cmdline_parse_token_string_t cmd_queue_rate_limit_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_queue_rate_limit_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_queue_rate_limit_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_queue_rate_limit_result,
+ port_num, UINT8);
+cmdline_parse_token_string_t cmd_queue_rate_limit_queue =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result,
+ queue, "queue");
+cmdline_parse_token_num_t cmd_queue_rate_limit_queuenum =
+ TOKEN_NUM_INITIALIZER(struct cmd_queue_rate_limit_result,
+ queue_num, UINT8);
+cmdline_parse_token_string_t cmd_queue_rate_limit_rate =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result,
+ rate, "rate");
+cmdline_parse_token_num_t cmd_queue_rate_limit_ratenum =
+ TOKEN_NUM_INITIALIZER(struct cmd_queue_rate_limit_result,
+ rate_num, UINT16);
+
+cmdline_parse_inst_t cmd_queue_rate_limit = {
+ .f = cmd_queue_rate_limit_parsed,
+ .data = (void *)0,
+ .help_str = "set port X queue Y rate Z:(X = port number,"
+ "Y = queue number,Z = rate number)set rate limit for a queue on port X",
+ .tokens = {
+ (void *)&cmd_queue_rate_limit_set,
+ (void *)&cmd_queue_rate_limit_port,
+ (void *)&cmd_queue_rate_limit_portnum,
+ (void *)&cmd_queue_rate_limit_queue,
+ (void *)&cmd_queue_rate_limit_queuenum,
+ (void *)&cmd_queue_rate_limit_rate,
+ (void *)&cmd_queue_rate_limit_ratenum,
+ NULL,
+ },
+};
+
+/* *** SET RATE LIMIT FOR A VF OF A PORT *** */
+struct cmd_vf_rate_limit_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_num;
+ cmdline_fixed_string_t vf;
+ uint8_t vf_num;
+ cmdline_fixed_string_t rate;
+ uint16_t rate_num;
+ cmdline_fixed_string_t q_msk;
+ uint64_t q_msk_val;
+};
+
+static void cmd_vf_rate_limit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_rate_limit_result *res = parsed_result;
+ int ret = 0;
+
+ if ((strcmp(res->set, "set") == 0) && (strcmp(res->port, "port") == 0)
+ && (strcmp(res->vf, "vf") == 0)
+ && (strcmp(res->rate, "rate") == 0)
+ && (strcmp(res->q_msk, "queue_mask") == 0))
+ ret = set_vf_rate_limit(res->port_num, res->vf_num,
+ res->rate_num, res->q_msk_val);
+ if (ret < 0)
+ printf("vf_rate_limit_cmd error: (%s)\n", strerror(-ret));
+
+}
+
+cmdline_parse_token_string_t cmd_vf_rate_limit_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_vf_rate_limit_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_vf_rate_limit_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result,
+ port_num, UINT8);
+cmdline_parse_token_string_t cmd_vf_rate_limit_vf =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result,
+ vf, "vf");
+cmdline_parse_token_num_t cmd_vf_rate_limit_vfnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result,
+ vf_num, UINT8);
+cmdline_parse_token_string_t cmd_vf_rate_limit_rate =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result,
+ rate, "rate");
+cmdline_parse_token_num_t cmd_vf_rate_limit_ratenum =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result,
+ rate_num, UINT16);
+cmdline_parse_token_string_t cmd_vf_rate_limit_q_msk =
+ TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result,
+ q_msk, "queue_mask");
+cmdline_parse_token_num_t cmd_vf_rate_limit_q_msk_val =
+ TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result,
+ q_msk_val, UINT64);
+
+cmdline_parse_inst_t cmd_vf_rate_limit = {
+ .f = cmd_vf_rate_limit_parsed,
+ .data = (void *)0,
+ .help_str = "set port X vf Y rate Z queue_mask V:(X = port number,"
+ "Y = VF number,Z = rate number, V = queue mask value)set rate limit "
+ "for queues of VF on port X",
+ .tokens = {
+ (void *)&cmd_vf_rate_limit_set,
+ (void *)&cmd_vf_rate_limit_port,
+ (void *)&cmd_vf_rate_limit_portnum,
+ (void *)&cmd_vf_rate_limit_vf,
+ (void *)&cmd_vf_rate_limit_vfnum,
+ (void *)&cmd_vf_rate_limit_rate,
+ (void *)&cmd_vf_rate_limit_ratenum,
+ (void *)&cmd_vf_rate_limit_q_msk,
+ (void *)&cmd_vf_rate_limit_q_msk_val,
+ NULL,
+ },
+};
+
+/* *** ADD TUNNEL FILTER OF A PORT *** */
+struct cmd_tunnel_filter_result {
+ cmdline_fixed_string_t cmd;
+ cmdline_fixed_string_t what;
+ uint8_t port_id;
+ struct ether_addr outer_mac;
+ struct ether_addr inner_mac;
+ cmdline_ipaddr_t ip_value;
+ uint16_t inner_vlan;
+ cmdline_fixed_string_t tunnel_type;
+ cmdline_fixed_string_t filter_type;
+ uint32_t tenant_id;
+ uint16_t queue_num;
+};
+
+static void
+cmd_tunnel_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tunnel_filter_result *res = parsed_result;
+ struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
+ int ret = 0;
+
+ memset(&tunnel_filter_conf, 0, sizeof(tunnel_filter_conf));
+
+ ether_addr_copy(&res->outer_mac, &tunnel_filter_conf.outer_mac);
+ ether_addr_copy(&res->inner_mac, &tunnel_filter_conf.inner_mac);
+ tunnel_filter_conf.inner_vlan = res->inner_vlan;
+
+ if (res->ip_value.family == AF_INET) {
+ tunnel_filter_conf.ip_addr.ipv4_addr =
+ res->ip_value.addr.ipv4.s_addr;
+ tunnel_filter_conf.ip_type = RTE_TUNNEL_IPTYPE_IPV4;
+ } else {
+ memcpy(&(tunnel_filter_conf.ip_addr.ipv6_addr),
+ &(res->ip_value.addr.ipv6),
+ sizeof(struct in6_addr));
+ tunnel_filter_conf.ip_type = RTE_TUNNEL_IPTYPE_IPV6;
+ }
+
+ if (!strcmp(res->filter_type, "imac-ivlan"))
+ tunnel_filter_conf.filter_type = RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else if (!strcmp(res->filter_type, "imac-ivlan-tenid"))
+ tunnel_filter_conf.filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else if (!strcmp(res->filter_type, "imac-tenid"))
+ tunnel_filter_conf.filter_type = RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (!strcmp(res->filter_type, "imac"))
+ tunnel_filter_conf.filter_type = ETH_TUNNEL_FILTER_IMAC;
+ else if (!strcmp(res->filter_type, "omac-imac-tenid"))
+ tunnel_filter_conf.filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ else if (!strcmp(res->filter_type, "oip"))
+ tunnel_filter_conf.filter_type = ETH_TUNNEL_FILTER_OIP;
+ else if (!strcmp(res->filter_type, "iip"))
+ tunnel_filter_conf.filter_type = ETH_TUNNEL_FILTER_IIP;
+ else {
+ printf("The filter type is not supported");
+ return;
+ }
+
+ if (!strcmp(res->tunnel_type, "vxlan"))
+ tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+ else if (!strcmp(res->tunnel_type, "nvgre"))
+ tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_NVGRE;
+ else if (!strcmp(res->tunnel_type, "ipingre"))
+ tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_IP_IN_GRE;
+ else {
+ printf("The tunnel type %s not supported.\n", res->tunnel_type);
+ return;
+ }
+
+ tunnel_filter_conf.tenant_id = res->tenant_id;
+ tunnel_filter_conf.queue_id = res->queue_num;
+ if (!strcmp(res->what, "add"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_TUNNEL,
+ RTE_ETH_FILTER_ADD,
+ &tunnel_filter_conf);
+ else
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_TUNNEL,
+ RTE_ETH_FILTER_DELETE,
+ &tunnel_filter_conf);
+ if (ret < 0)
+ printf("cmd_tunnel_filter_parsed error: (%s)\n",
+ strerror(-ret));
+
+}
+cmdline_parse_token_string_t cmd_tunnel_filter_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result,
+ cmd, "tunnel_filter");
+cmdline_parse_token_string_t cmd_tunnel_filter_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result,
+ what, "add#rm");
+cmdline_parse_token_num_t cmd_tunnel_filter_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result,
+ port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_tunnel_filter_outer_mac =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_tunnel_filter_result,
+ outer_mac);
+cmdline_parse_token_etheraddr_t cmd_tunnel_filter_inner_mac =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_tunnel_filter_result,
+ inner_mac);
+cmdline_parse_token_num_t cmd_tunnel_filter_innner_vlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result,
+ inner_vlan, UINT16);
+cmdline_parse_token_ipaddr_t cmd_tunnel_filter_ip_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_tunnel_filter_result,
+ ip_value);
+cmdline_parse_token_string_t cmd_tunnel_filter_tunnel_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result,
+ tunnel_type, "vxlan#nvgre#ipingre");
+
+cmdline_parse_token_string_t cmd_tunnel_filter_filter_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result,
+ filter_type, "oip#iip#imac-ivlan#imac-ivlan-tenid#imac-tenid#"
+ "imac#omac-imac-tenid");
+cmdline_parse_token_num_t cmd_tunnel_filter_tenant_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result,
+ tenant_id, UINT32);
+cmdline_parse_token_num_t cmd_tunnel_filter_queue_num =
+ TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result,
+ queue_num, UINT16);
+
+cmdline_parse_inst_t cmd_tunnel_filter = {
+ .f = cmd_tunnel_filter_parsed,
+ .data = (void *)0,
+ .help_str = "add/rm tunnel filter of a port: "
+ "tunnel_filter add port_id outer_mac inner_mac ip "
+ "inner_vlan tunnel_type(vxlan|nvgre|ipingre) filter_type "
+ "(oip|iip|imac-ivlan|imac-ivlan-tenid|imac-tenid|"
+ "imac|omac-imac-tenid) "
+ "tenant_id queue_num",
+ .tokens = {
+ (void *)&cmd_tunnel_filter_cmd,
+ (void *)&cmd_tunnel_filter_what,
+ (void *)&cmd_tunnel_filter_port_id,
+ (void *)&cmd_tunnel_filter_outer_mac,
+ (void *)&cmd_tunnel_filter_inner_mac,
+ (void *)&cmd_tunnel_filter_ip_value,
+ (void *)&cmd_tunnel_filter_innner_vlan,
+ (void *)&cmd_tunnel_filter_tunnel_type,
+ (void *)&cmd_tunnel_filter_filter_type,
+ (void *)&cmd_tunnel_filter_tenant_id,
+ (void *)&cmd_tunnel_filter_queue_num,
+ NULL,
+ },
+};
+
+/* *** CONFIGURE TUNNEL UDP PORT *** */
+struct cmd_tunnel_udp_config {
+ cmdline_fixed_string_t cmd;
+ cmdline_fixed_string_t what;
+ uint16_t udp_port;
+ uint8_t port_id;
+};
+
+static void
+cmd_tunnel_udp_config_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tunnel_udp_config *res = parsed_result;
+ struct rte_eth_udp_tunnel tunnel_udp;
+ int ret;
+
+ tunnel_udp.udp_port = res->udp_port;
+
+ if (!strcmp(res->cmd, "rx_vxlan_port"))
+ tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ if (!strcmp(res->what, "add"))
+ ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
+ &tunnel_udp);
+ else
+ ret = rte_eth_dev_udp_tunnel_port_delete(res->port_id,
+ &tunnel_udp);
+
+ if (ret < 0)
+ printf("udp tunneling add error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_tunnel_udp_config_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config,
+ cmd, "rx_vxlan_port");
+cmdline_parse_token_string_t cmd_tunnel_udp_config_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config,
+ what, "add#rm");
+cmdline_parse_token_num_t cmd_tunnel_udp_config_udp_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_tunnel_udp_config,
+ udp_port, UINT16);
+cmdline_parse_token_num_t cmd_tunnel_udp_config_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_tunnel_udp_config,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_tunnel_udp_config = {
+ .f = cmd_tunnel_udp_config_parsed,
+ .data = (void *)0,
+ .help_str = "add/rm an tunneling UDP port filter: "
+ "rx_vxlan_port add udp_port port_id",
+ .tokens = {
+ (void *)&cmd_tunnel_udp_config_cmd,
+ (void *)&cmd_tunnel_udp_config_what,
+ (void *)&cmd_tunnel_udp_config_udp_port,
+ (void *)&cmd_tunnel_udp_config_port_id,
+ NULL,
+ },
+};
+
+/* *** GLOBAL CONFIG *** */
+struct cmd_global_config_result {
+ cmdline_fixed_string_t cmd;
+ uint8_t port_id;
+ cmdline_fixed_string_t cfg_type;
+ uint8_t len;
+};
+
+static void
+cmd_global_config_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_global_config_result *res = parsed_result;
+ struct rte_eth_global_cfg conf;
+ int ret;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.cfg_type = RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN;
+ conf.cfg.gre_key_len = res->len;
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_NONE,
+ RTE_ETH_FILTER_SET, &conf);
+ if (ret != 0)
+ printf("Global config error\n");
+}
+
+cmdline_parse_token_string_t cmd_global_config_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_global_config_result, cmd,
+ "global_config");
+cmdline_parse_token_num_t cmd_global_config_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_global_config_result, port_id, UINT8);
+cmdline_parse_token_string_t cmd_global_config_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_global_config_result,
+ cfg_type, "gre-key-len");
+cmdline_parse_token_num_t cmd_global_config_gre_key_len =
+ TOKEN_NUM_INITIALIZER(struct cmd_global_config_result,
+ len, UINT8);
+
+cmdline_parse_inst_t cmd_global_config = {
+ .f = cmd_global_config_parsed,
+ .data = (void *)NULL,
+ .help_str = "global_config <port_id> gre-key-len <number>",
+ .tokens = {
+ (void *)&cmd_global_config_cmd,
+ (void *)&cmd_global_config_port_id,
+ (void *)&cmd_global_config_type,
+ (void *)&cmd_global_config_gre_key_len,
+ NULL,
+ },
+};
+
+/* *** CONFIGURE VM MIRROR VLAN/POOL RULE *** */
+struct cmd_set_mirror_mask_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t mirror;
+ uint8_t rule_id;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t value;
+ cmdline_fixed_string_t dstpool;
+ uint8_t dstpool_id;
+ cmdline_fixed_string_t on;
+};
+
+cmdline_parse_token_string_t cmd_mirror_mask_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_mirror_mask_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_mirror_mask_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_mirror_mask_mirror =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
+ mirror, "mirror-rule");
+cmdline_parse_token_num_t cmd_mirror_mask_ruleid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result,
+ rule_id, UINT8);
+cmdline_parse_token_string_t cmd_mirror_mask_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
+ what, "pool-mirror-up#pool-mirror-down"
+ "#vlan-mirror");
+cmdline_parse_token_string_t cmd_mirror_mask_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
+ value, NULL);
+cmdline_parse_token_string_t cmd_mirror_mask_dstpool =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
+ dstpool, "dst-pool");
+cmdline_parse_token_num_t cmd_mirror_mask_poolid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result,
+ dstpool_id, UINT8);
+cmdline_parse_token_string_t cmd_mirror_mask_on =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
+ on, "on#off");
+
+static void
+cmd_set_mirror_mask_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret,nb_item,i;
+ struct cmd_set_mirror_mask_result *res = parsed_result;
+ struct rte_eth_mirror_conf mr_conf;
+
+ memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf));
+
+ unsigned int vlan_list[ETH_MIRROR_MAX_VLANS];
+
+ mr_conf.dst_pool = res->dstpool_id;
+
+ if (!strcmp(res->what, "pool-mirror-up")) {
+ mr_conf.pool_mask = strtoull(res->value, NULL, 16);
+ mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_UP;
+ } else if (!strcmp(res->what, "pool-mirror-down")) {
+ mr_conf.pool_mask = strtoull(res->value, NULL, 16);
+ mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_DOWN;
+ } else if (!strcmp(res->what, "vlan-mirror")) {
+ mr_conf.rule_type = ETH_MIRROR_VLAN;
+ nb_item = parse_item_list(res->value, "vlan",
+ ETH_MIRROR_MAX_VLANS, vlan_list, 1);
+ if (nb_item <= 0)
+ return;
+
+ for (i = 0; i < nb_item; i++) {
+ if (vlan_list[i] > ETHER_MAX_VLAN_ID) {
+ printf("Invalid vlan_id: must be < 4096\n");
+ return;
+ }
+
+ mr_conf.vlan.vlan_id[i] = (uint16_t)vlan_list[i];
+ mr_conf.vlan.vlan_mask |= 1ULL << i;
+ }
+ }
+
+ if (!strcmp(res->on, "on"))
+ ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf,
+ res->rule_id, 1);
+ else
+ ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf,
+ res->rule_id, 0);
+ if (ret < 0)
+ printf("mirror rule add error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_inst_t cmd_set_mirror_mask = {
+ .f = cmd_set_mirror_mask_parsed,
+ .data = NULL,
+ .help_str = "set port X mirror-rule Y pool-mirror-up|pool-mirror-down|vlan-mirror"
+ " pool_mask|vlan_id[,vlan_id]* dst-pool Z on|off",
+ .tokens = {
+ (void *)&cmd_mirror_mask_set,
+ (void *)&cmd_mirror_mask_port,
+ (void *)&cmd_mirror_mask_portid,
+ (void *)&cmd_mirror_mask_mirror,
+ (void *)&cmd_mirror_mask_ruleid,
+ (void *)&cmd_mirror_mask_what,
+ (void *)&cmd_mirror_mask_value,
+ (void *)&cmd_mirror_mask_dstpool,
+ (void *)&cmd_mirror_mask_poolid,
+ (void *)&cmd_mirror_mask_on,
+ NULL,
+ },
+};
+
+/* *** CONFIGURE VM MIRROR UDLINK/DOWNLINK RULE *** */
+struct cmd_set_mirror_link_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t mirror;
+ uint8_t rule_id;
+ cmdline_fixed_string_t what;
+ cmdline_fixed_string_t dstpool;
+ uint8_t dstpool_id;
+ cmdline_fixed_string_t on;
+};
+
+cmdline_parse_token_string_t cmd_mirror_link_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_mirror_link_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_mirror_link_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_mirror_link_mirror =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
+ mirror, "mirror-rule");
+cmdline_parse_token_num_t cmd_mirror_link_ruleid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result,
+ rule_id, UINT8);
+cmdline_parse_token_string_t cmd_mirror_link_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
+ what, "uplink-mirror#downlink-mirror");
+cmdline_parse_token_string_t cmd_mirror_link_dstpool =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
+ dstpool, "dst-pool");
+cmdline_parse_token_num_t cmd_mirror_link_poolid =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result,
+ dstpool_id, UINT8);
+cmdline_parse_token_string_t cmd_mirror_link_on =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
+ on, "on#off");
+
+static void
+cmd_set_mirror_link_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret;
+ struct cmd_set_mirror_link_result *res = parsed_result;
+ struct rte_eth_mirror_conf mr_conf;
+
+ memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf));
+ if (!strcmp(res->what, "uplink-mirror"))
+ mr_conf.rule_type = ETH_MIRROR_UPLINK_PORT;
+ else
+ mr_conf.rule_type = ETH_MIRROR_DOWNLINK_PORT;
+
+ mr_conf.dst_pool = res->dstpool_id;
+
+ if (!strcmp(res->on, "on"))
+ ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf,
+ res->rule_id, 1);
+ else
+ ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf,
+ res->rule_id, 0);
+
+ /* check the return value and print it if is < 0 */
+ if (ret < 0)
+ printf("mirror rule add error: (%s)\n", strerror(-ret));
+
+}
+
+cmdline_parse_inst_t cmd_set_mirror_link = {
+ .f = cmd_set_mirror_link_parsed,
+ .data = NULL,
+ .help_str = "set port X mirror-rule Y uplink-mirror|"
+ "downlink-mirror dst-pool Z on|off",
+ .tokens = {
+ (void *)&cmd_mirror_link_set,
+ (void *)&cmd_mirror_link_port,
+ (void *)&cmd_mirror_link_portid,
+ (void *)&cmd_mirror_link_mirror,
+ (void *)&cmd_mirror_link_ruleid,
+ (void *)&cmd_mirror_link_what,
+ (void *)&cmd_mirror_link_dstpool,
+ (void *)&cmd_mirror_link_poolid,
+ (void *)&cmd_mirror_link_on,
+ NULL,
+ },
+};
+
+/* *** RESET VM MIRROR RULE *** */
+struct cmd_rm_mirror_rule_result {
+ cmdline_fixed_string_t reset;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t mirror;
+ uint8_t rule_id;
+};
+
+cmdline_parse_token_string_t cmd_rm_mirror_rule_reset =
+ TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result,
+ reset, "reset");
+cmdline_parse_token_string_t cmd_rm_mirror_rule_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_rm_mirror_rule_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_rm_mirror_rule_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_rm_mirror_rule_mirror =
+ TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result,
+ mirror, "mirror-rule");
+cmdline_parse_token_num_t cmd_rm_mirror_rule_ruleid =
+ TOKEN_NUM_INITIALIZER(struct cmd_rm_mirror_rule_result,
+ rule_id, UINT8);
+
+static void
+cmd_reset_mirror_rule_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret;
+ struct cmd_set_mirror_link_result *res = parsed_result;
+ /* check rule_id */
+ ret = rte_eth_mirror_rule_reset(res->port_id,res->rule_id);
+ if(ret < 0)
+ printf("mirror rule remove error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_inst_t cmd_reset_mirror_rule = {
+ .f = cmd_reset_mirror_rule_parsed,
+ .data = NULL,
+ .help_str = "reset port X mirror-rule Y",
+ .tokens = {
+ (void *)&cmd_rm_mirror_rule_reset,
+ (void *)&cmd_rm_mirror_rule_port,
+ (void *)&cmd_rm_mirror_rule_portid,
+ (void *)&cmd_rm_mirror_rule_mirror,
+ (void *)&cmd_rm_mirror_rule_ruleid,
+ NULL,
+ },
+};
+
+/* ******************************************************************************** */
+
+struct cmd_dump_result {
+ cmdline_fixed_string_t dump;
+};
+
+static void
+dump_struct_sizes(void)
+{
+#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
+ DUMP_SIZE(struct rte_mbuf);
+ DUMP_SIZE(struct rte_mempool);
+ DUMP_SIZE(struct rte_ring);
+#undef DUMP_SIZE
+}
+
+static void cmd_dump_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_dump_result *res = parsed_result;
+
+ if (!strcmp(res->dump, "dump_physmem"))
+ rte_dump_physmem_layout(stdout);
+ else if (!strcmp(res->dump, "dump_memzone"))
+ rte_memzone_dump(stdout);
+ else if (!strcmp(res->dump, "dump_log_history"))
+ rte_log_dump_history(stdout);
+ else if (!strcmp(res->dump, "dump_struct_sizes"))
+ dump_struct_sizes();
+ else if (!strcmp(res->dump, "dump_ring"))
+ rte_ring_list_dump(stdout);
+ else if (!strcmp(res->dump, "dump_mempool"))
+ rte_mempool_list_dump(stdout);
+ else if (!strcmp(res->dump, "dump_devargs"))
+ rte_eal_devargs_dump(stdout);
+}
+
+cmdline_parse_token_string_t cmd_dump_dump =
+ TOKEN_STRING_INITIALIZER(struct cmd_dump_result, dump,
+ "dump_physmem#"
+ "dump_memzone#"
+ "dump_log_history#"
+ "dump_struct_sizes#"
+ "dump_ring#"
+ "dump_mempool#"
+ "dump_devargs");
+
+cmdline_parse_inst_t cmd_dump = {
+ .f = cmd_dump_parsed, /* function to call */
+ .data = NULL, /* 2nd arg of func */
+ .help_str = "dump status",
+ .tokens = { /* token list, NULL terminated */
+ (void *)&cmd_dump_dump,
+ NULL,
+ },
+};
+
+/* ******************************************************************************** */
+
+struct cmd_dump_one_result {
+ cmdline_fixed_string_t dump;
+ cmdline_fixed_string_t name;
+};
+
+static void cmd_dump_one_parsed(void *parsed_result, struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_dump_one_result *res = parsed_result;
+
+ if (!strcmp(res->dump, "dump_ring")) {
+ struct rte_ring *r;
+ r = rte_ring_lookup(res->name);
+ if (r == NULL) {
+ cmdline_printf(cl, "Cannot find ring\n");
+ return;
+ }
+ rte_ring_dump(stdout, r);
+ } else if (!strcmp(res->dump, "dump_mempool")) {
+ struct rte_mempool *mp;
+ mp = rte_mempool_lookup(res->name);
+ if (mp == NULL) {
+ cmdline_printf(cl, "Cannot find mempool\n");
+ return;
+ }
+ rte_mempool_dump(stdout, mp);
+ }
+}
+
+cmdline_parse_token_string_t cmd_dump_one_dump =
+ TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, dump,
+ "dump_ring#dump_mempool");
+
+cmdline_parse_token_string_t cmd_dump_one_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, name, NULL);
+
+cmdline_parse_inst_t cmd_dump_one = {
+ .f = cmd_dump_one_parsed, /* function to call */
+ .data = NULL, /* 2nd arg of func */
+ .help_str = "dump one ring/mempool: dump_ring|dump_mempool <name>",
+ .tokens = { /* token list, NULL terminated */
+ (void *)&cmd_dump_one_dump,
+ (void *)&cmd_dump_one_name,
+ NULL,
+ },
+};
+
+/* *** Add/Del syn filter *** */
+struct cmd_syn_filter_result {
+ cmdline_fixed_string_t filter;
+ uint8_t port_id;
+ cmdline_fixed_string_t ops;
+ cmdline_fixed_string_t priority;
+ cmdline_fixed_string_t high;
+ cmdline_fixed_string_t queue;
+ uint16_t queue_id;
+};
+
+static void
+cmd_syn_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_syn_filter_result *res = parsed_result;
+ struct rte_eth_syn_filter syn_filter;
+ int ret = 0;
+
+ ret = rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_SYN);
+ if (ret < 0) {
+ printf("syn filter is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+
+ memset(&syn_filter, 0, sizeof(syn_filter));
+
+ if (!strcmp(res->ops, "add")) {
+ if (!strcmp(res->high, "high"))
+ syn_filter.hig_pri = 1;
+ else
+ syn_filter.hig_pri = 0;
+
+ syn_filter.queue = res->queue_id;
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_SYN,
+ RTE_ETH_FILTER_ADD,
+ &syn_filter);
+ } else
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_SYN,
+ RTE_ETH_FILTER_DELETE,
+ &syn_filter);
+
+ if (ret < 0)
+ printf("syn filter programming error: (%s)\n",
+ strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_syn_filter_filter =
+ TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result,
+ filter, "syn_filter");
+cmdline_parse_token_num_t cmd_syn_filter_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_syn_filter_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_syn_filter_ops =
+ TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result,
+ ops, "add#del");
+cmdline_parse_token_string_t cmd_syn_filter_priority =
+ TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result,
+ priority, "priority");
+cmdline_parse_token_string_t cmd_syn_filter_high =
+ TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result,
+ high, "high#low");
+cmdline_parse_token_string_t cmd_syn_filter_queue =
+ TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result,
+ queue, "queue");
+cmdline_parse_token_num_t cmd_syn_filter_queue_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_syn_filter_result,
+ queue_id, UINT16);
+
+cmdline_parse_inst_t cmd_syn_filter = {
+ .f = cmd_syn_filter_parsed,
+ .data = NULL,
+ .help_str = "add/delete syn filter",
+ .tokens = {
+ (void *)&cmd_syn_filter_filter,
+ (void *)&cmd_syn_filter_port_id,
+ (void *)&cmd_syn_filter_ops,
+ (void *)&cmd_syn_filter_priority,
+ (void *)&cmd_syn_filter_high,
+ (void *)&cmd_syn_filter_queue,
+ (void *)&cmd_syn_filter_queue_id,
+ NULL,
+ },
+};
+
+/* *** ADD/REMOVE A 2tuple FILTER *** */
+struct cmd_2tuple_filter_result {
+ cmdline_fixed_string_t filter;
+ uint8_t port_id;
+ cmdline_fixed_string_t ops;
+ cmdline_fixed_string_t dst_port;
+ uint16_t dst_port_value;
+ cmdline_fixed_string_t protocol;
+ uint8_t protocol_value;
+ cmdline_fixed_string_t mask;
+ uint8_t mask_value;
+ cmdline_fixed_string_t tcp_flags;
+ uint8_t tcp_flags_value;
+ cmdline_fixed_string_t priority;
+ uint8_t priority_value;
+ cmdline_fixed_string_t queue;
+ uint16_t queue_id;
+};
+
+static void
+cmd_2tuple_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct rte_eth_ntuple_filter filter;
+ struct cmd_2tuple_filter_result *res = parsed_result;
+ int ret = 0;
+
+ ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_NTUPLE);
+ if (ret < 0) {
+ printf("ntuple filter is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+
+ memset(&filter, 0, sizeof(struct rte_eth_ntuple_filter));
+
+ filter.flags = RTE_2TUPLE_FLAGS;
+ filter.dst_port_mask = (res->mask_value & 0x02) ? UINT16_MAX : 0;
+ filter.proto_mask = (res->mask_value & 0x01) ? UINT8_MAX : 0;
+ filter.proto = res->protocol_value;
+ filter.priority = res->priority_value;
+ if (res->tcp_flags_value != 0 && filter.proto != IPPROTO_TCP) {
+ printf("nonzero tcp_flags is only meaningful"
+ " when protocol is TCP.\n");
+ return;
+ }
+ if (res->tcp_flags_value > TCP_FLAG_ALL) {
+ printf("invalid TCP flags.\n");
+ return;
+ }
+
+ if (res->tcp_flags_value != 0) {
+ filter.flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ filter.tcp_flags = res->tcp_flags_value;
+ }
+
+ /* need convert to big endian. */
+ filter.dst_port = rte_cpu_to_be_16(res->dst_port_value);
+ filter.queue = res->queue_id;
+
+ if (!strcmp(res->ops, "add"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+ else
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+ if (ret < 0)
+ printf("2tuple filter programming error: (%s)\n",
+ strerror(-ret));
+
+}
+
+cmdline_parse_token_string_t cmd_2tuple_filter_filter =
+ TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
+ filter, "2tuple_filter");
+cmdline_parse_token_num_t cmd_2tuple_filter_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_2tuple_filter_ops =
+ TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
+ ops, "add#del");
+cmdline_parse_token_string_t cmd_2tuple_filter_dst_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
+ dst_port, "dst_port");
+cmdline_parse_token_num_t cmd_2tuple_filter_dst_port_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result,
+ dst_port_value, UINT16);
+cmdline_parse_token_string_t cmd_2tuple_filter_protocol =
+ TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
+ protocol, "protocol");
+cmdline_parse_token_num_t cmd_2tuple_filter_protocol_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result,
+ protocol_value, UINT8);
+cmdline_parse_token_string_t cmd_2tuple_filter_mask =
+ TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
+ mask, "mask");
+cmdline_parse_token_num_t cmd_2tuple_filter_mask_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result,
+ mask_value, INT8);
+cmdline_parse_token_string_t cmd_2tuple_filter_tcp_flags =
+ TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
+ tcp_flags, "tcp_flags");
+cmdline_parse_token_num_t cmd_2tuple_filter_tcp_flags_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result,
+ tcp_flags_value, UINT8);
+cmdline_parse_token_string_t cmd_2tuple_filter_priority =
+ TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
+ priority, "priority");
+cmdline_parse_token_num_t cmd_2tuple_filter_priority_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result,
+ priority_value, UINT8);
+cmdline_parse_token_string_t cmd_2tuple_filter_queue =
+ TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
+ queue, "queue");
+cmdline_parse_token_num_t cmd_2tuple_filter_queue_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result,
+ queue_id, UINT16);
+
+cmdline_parse_inst_t cmd_2tuple_filter = {
+ .f = cmd_2tuple_filter_parsed,
+ .data = NULL,
+ .help_str = "add a 2tuple filter",
+ .tokens = {
+ (void *)&cmd_2tuple_filter_filter,
+ (void *)&cmd_2tuple_filter_port_id,
+ (void *)&cmd_2tuple_filter_ops,
+ (void *)&cmd_2tuple_filter_dst_port,
+ (void *)&cmd_2tuple_filter_dst_port_value,
+ (void *)&cmd_2tuple_filter_protocol,
+ (void *)&cmd_2tuple_filter_protocol_value,
+ (void *)&cmd_2tuple_filter_mask,
+ (void *)&cmd_2tuple_filter_mask_value,
+ (void *)&cmd_2tuple_filter_tcp_flags,
+ (void *)&cmd_2tuple_filter_tcp_flags_value,
+ (void *)&cmd_2tuple_filter_priority,
+ (void *)&cmd_2tuple_filter_priority_value,
+ (void *)&cmd_2tuple_filter_queue,
+ (void *)&cmd_2tuple_filter_queue_id,
+ NULL,
+ },
+};
+
+/* *** ADD/REMOVE A 5tuple FILTER *** */
+struct cmd_5tuple_filter_result {
+ cmdline_fixed_string_t filter;
+ uint8_t port_id;
+ cmdline_fixed_string_t ops;
+ cmdline_fixed_string_t dst_ip;
+ cmdline_ipaddr_t dst_ip_value;
+ cmdline_fixed_string_t src_ip;
+ cmdline_ipaddr_t src_ip_value;
+ cmdline_fixed_string_t dst_port;
+ uint16_t dst_port_value;
+ cmdline_fixed_string_t src_port;
+ uint16_t src_port_value;
+ cmdline_fixed_string_t protocol;
+ uint8_t protocol_value;
+ cmdline_fixed_string_t mask;
+ uint8_t mask_value;
+ cmdline_fixed_string_t tcp_flags;
+ uint8_t tcp_flags_value;
+ cmdline_fixed_string_t priority;
+ uint8_t priority_value;
+ cmdline_fixed_string_t queue;
+ uint16_t queue_id;
+};
+
+static void
+cmd_5tuple_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct rte_eth_ntuple_filter filter;
+ struct cmd_5tuple_filter_result *res = parsed_result;
+ int ret = 0;
+
+ ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_NTUPLE);
+ if (ret < 0) {
+ printf("ntuple filter is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+
+ memset(&filter, 0, sizeof(struct rte_eth_ntuple_filter));
+
+ filter.flags = RTE_5TUPLE_FLAGS;
+ filter.dst_ip_mask = (res->mask_value & 0x10) ? UINT32_MAX : 0;
+ filter.src_ip_mask = (res->mask_value & 0x08) ? UINT32_MAX : 0;
+ filter.dst_port_mask = (res->mask_value & 0x04) ? UINT16_MAX : 0;
+ filter.src_port_mask = (res->mask_value & 0x02) ? UINT16_MAX : 0;
+ filter.proto_mask = (res->mask_value & 0x01) ? UINT8_MAX : 0;
+ filter.proto = res->protocol_value;
+ filter.priority = res->priority_value;
+ if (res->tcp_flags_value != 0 && filter.proto != IPPROTO_TCP) {
+ printf("nonzero tcp_flags is only meaningful"
+ " when protocol is TCP.\n");
+ return;
+ }
+ if (res->tcp_flags_value > TCP_FLAG_ALL) {
+ printf("invalid TCP flags.\n");
+ return;
+ }
+
+ if (res->tcp_flags_value != 0) {
+ filter.flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ filter.tcp_flags = res->tcp_flags_value;
+ }
+
+ if (res->dst_ip_value.family == AF_INET)
+ /* no need to convert, already big endian. */
+ filter.dst_ip = res->dst_ip_value.addr.ipv4.s_addr;
+ else {
+ if (filter.dst_ip_mask == 0) {
+ printf("can not support ipv6 involved compare.\n");
+ return;
+ }
+ filter.dst_ip = 0;
+ }
+
+ if (res->src_ip_value.family == AF_INET)
+ /* no need to convert, already big endian. */
+ filter.src_ip = res->src_ip_value.addr.ipv4.s_addr;
+ else {
+ if (filter.src_ip_mask == 0) {
+ printf("can not support ipv6 involved compare.\n");
+ return;
+ }
+ filter.src_ip = 0;
+ }
+ /* need convert to big endian. */
+ filter.dst_port = rte_cpu_to_be_16(res->dst_port_value);
+ filter.src_port = rte_cpu_to_be_16(res->src_port_value);
+ filter.queue = res->queue_id;
+
+ if (!strcmp(res->ops, "add"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+ else
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+ if (ret < 0)
+ printf("5tuple filter programming error: (%s)\n",
+ strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_5tuple_filter_filter =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ filter, "5tuple_filter");
+cmdline_parse_token_num_t cmd_5tuple_filter_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_5tuple_filter_ops =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ ops, "add#del");
+cmdline_parse_token_string_t cmd_5tuple_filter_dst_ip =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ dst_ip, "dst_ip");
+cmdline_parse_token_ipaddr_t cmd_5tuple_filter_dst_ip_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_5tuple_filter_result,
+ dst_ip_value);
+cmdline_parse_token_string_t cmd_5tuple_filter_src_ip =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ src_ip, "src_ip");
+cmdline_parse_token_ipaddr_t cmd_5tuple_filter_src_ip_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_5tuple_filter_result,
+ src_ip_value);
+cmdline_parse_token_string_t cmd_5tuple_filter_dst_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ dst_port, "dst_port");
+cmdline_parse_token_num_t cmd_5tuple_filter_dst_port_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
+ dst_port_value, UINT16);
+cmdline_parse_token_string_t cmd_5tuple_filter_src_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ src_port, "src_port");
+cmdline_parse_token_num_t cmd_5tuple_filter_src_port_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
+ src_port_value, UINT16);
+cmdline_parse_token_string_t cmd_5tuple_filter_protocol =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ protocol, "protocol");
+cmdline_parse_token_num_t cmd_5tuple_filter_protocol_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
+ protocol_value, UINT8);
+cmdline_parse_token_string_t cmd_5tuple_filter_mask =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ mask, "mask");
+cmdline_parse_token_num_t cmd_5tuple_filter_mask_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
+ mask_value, INT8);
+cmdline_parse_token_string_t cmd_5tuple_filter_tcp_flags =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ tcp_flags, "tcp_flags");
+cmdline_parse_token_num_t cmd_5tuple_filter_tcp_flags_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
+ tcp_flags_value, UINT8);
+cmdline_parse_token_string_t cmd_5tuple_filter_priority =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ priority, "priority");
+cmdline_parse_token_num_t cmd_5tuple_filter_priority_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
+ priority_value, UINT8);
+cmdline_parse_token_string_t cmd_5tuple_filter_queue =
+ TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
+ queue, "queue");
+cmdline_parse_token_num_t cmd_5tuple_filter_queue_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
+ queue_id, UINT16);
+
+cmdline_parse_inst_t cmd_5tuple_filter = {
+ .f = cmd_5tuple_filter_parsed,
+ .data = NULL,
+ .help_str = "add/del a 5tuple filter",
+ .tokens = {
+ (void *)&cmd_5tuple_filter_filter,
+ (void *)&cmd_5tuple_filter_port_id,
+ (void *)&cmd_5tuple_filter_ops,
+ (void *)&cmd_5tuple_filter_dst_ip,
+ (void *)&cmd_5tuple_filter_dst_ip_value,
+ (void *)&cmd_5tuple_filter_src_ip,
+ (void *)&cmd_5tuple_filter_src_ip_value,
+ (void *)&cmd_5tuple_filter_dst_port,
+ (void *)&cmd_5tuple_filter_dst_port_value,
+ (void *)&cmd_5tuple_filter_src_port,
+ (void *)&cmd_5tuple_filter_src_port_value,
+ (void *)&cmd_5tuple_filter_protocol,
+ (void *)&cmd_5tuple_filter_protocol_value,
+ (void *)&cmd_5tuple_filter_mask,
+ (void *)&cmd_5tuple_filter_mask_value,
+ (void *)&cmd_5tuple_filter_tcp_flags,
+ (void *)&cmd_5tuple_filter_tcp_flags_value,
+ (void *)&cmd_5tuple_filter_priority,
+ (void *)&cmd_5tuple_filter_priority_value,
+ (void *)&cmd_5tuple_filter_queue,
+ (void *)&cmd_5tuple_filter_queue_id,
+ NULL,
+ },
+};
+
+/* *** ADD/REMOVE A flex FILTER *** */
+struct cmd_flex_filter_result {
+ cmdline_fixed_string_t filter;
+ cmdline_fixed_string_t ops;
+ uint8_t port_id;
+ cmdline_fixed_string_t len;
+ uint8_t len_value;
+ cmdline_fixed_string_t bytes;
+ cmdline_fixed_string_t bytes_value;
+ cmdline_fixed_string_t mask;
+ cmdline_fixed_string_t mask_value;
+ cmdline_fixed_string_t priority;
+ uint8_t priority_value;
+ cmdline_fixed_string_t queue;
+ uint16_t queue_id;
+};
+
+static int xdigit2val(unsigned char c)
+{
+ int val;
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+ return val;
+}
+
+static void
+cmd_flex_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret = 0;
+ struct rte_eth_flex_filter filter;
+ struct cmd_flex_filter_result *res = parsed_result;
+ char *bytes_ptr, *mask_ptr;
+ uint16_t len, i, j = 0;
+ char c;
+ int val;
+ uint8_t byte = 0;
+
+ if (res->len_value > RTE_FLEX_FILTER_MAXLEN) {
+ printf("the len exceed the max length 128\n");
+ return;
+ }
+ memset(&filter, 0, sizeof(struct rte_eth_flex_filter));
+ filter.len = res->len_value;
+ filter.priority = res->priority_value;
+ filter.queue = res->queue_id;
+ bytes_ptr = res->bytes_value;
+ mask_ptr = res->mask_value;
+
+ /* translate bytes string to array. */
+ if (bytes_ptr[0] == '0' && ((bytes_ptr[1] == 'x') ||
+ (bytes_ptr[1] == 'X')))
+ bytes_ptr += 2;
+ len = strnlen(bytes_ptr, res->len_value * 2);
+ if (len == 0 || (len % 8 != 0)) {
+ printf("please check len and bytes input\n");
+ return;
+ }
+ for (i = 0; i < len; i++) {
+ c = bytes_ptr[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters. */
+ printf("invalid input\n");
+ return;
+ }
+ val = xdigit2val(c);
+ if (i % 2) {
+ byte |= val;
+ filter.bytes[j] = byte;
+ printf("bytes[%d]:%02x ", j, filter.bytes[j]);
+ j++;
+ byte = 0;
+ } else
+ byte |= val << 4;
+ }
+ printf("\n");
+ /* translate mask string to uint8_t array. */
+ if (mask_ptr[0] == '0' && ((mask_ptr[1] == 'x') ||
+ (mask_ptr[1] == 'X')))
+ mask_ptr += 2;
+ len = strnlen(mask_ptr, (res->len_value + 3) / 4);
+ if (len == 0) {
+ printf("invalid input\n");
+ return;
+ }
+ j = 0;
+ byte = 0;
+ for (i = 0; i < len; i++) {
+ c = mask_ptr[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters. */
+ printf("invalid input\n");
+ return;
+ }
+ val = xdigit2val(c);
+ if (i % 2) {
+ byte |= val;
+ filter.mask[j] = byte;
+ printf("mask[%d]:%02x ", j, filter.mask[j]);
+ j++;
+ byte = 0;
+ } else
+ byte |= val << 4;
+ }
+ printf("\n");
+
+ if (!strcmp(res->ops, "add"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_FLEXIBLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+ else
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_FLEXIBLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+
+ if (ret < 0)
+ printf("flex filter setting error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_flex_filter_filter =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ filter, "flex_filter");
+cmdline_parse_token_num_t cmd_flex_filter_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_flex_filter_ops =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ ops, "add#del");
+cmdline_parse_token_string_t cmd_flex_filter_len =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ len, "len");
+cmdline_parse_token_num_t cmd_flex_filter_len_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result,
+ len_value, UINT8);
+cmdline_parse_token_string_t cmd_flex_filter_bytes =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ bytes, "bytes");
+cmdline_parse_token_string_t cmd_flex_filter_bytes_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ bytes_value, NULL);
+cmdline_parse_token_string_t cmd_flex_filter_mask =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ mask, "mask");
+cmdline_parse_token_string_t cmd_flex_filter_mask_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ mask_value, NULL);
+cmdline_parse_token_string_t cmd_flex_filter_priority =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ priority, "priority");
+cmdline_parse_token_num_t cmd_flex_filter_priority_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result,
+ priority_value, UINT8);
+cmdline_parse_token_string_t cmd_flex_filter_queue =
+ TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
+ queue, "queue");
+cmdline_parse_token_num_t cmd_flex_filter_queue_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result,
+ queue_id, UINT16);
+cmdline_parse_inst_t cmd_flex_filter = {
+ .f = cmd_flex_filter_parsed,
+ .data = NULL,
+ .help_str = "add/del a flex filter",
+ .tokens = {
+ (void *)&cmd_flex_filter_filter,
+ (void *)&cmd_flex_filter_port_id,
+ (void *)&cmd_flex_filter_ops,
+ (void *)&cmd_flex_filter_len,
+ (void *)&cmd_flex_filter_len_value,
+ (void *)&cmd_flex_filter_bytes,
+ (void *)&cmd_flex_filter_bytes_value,
+ (void *)&cmd_flex_filter_mask,
+ (void *)&cmd_flex_filter_mask_value,
+ (void *)&cmd_flex_filter_priority,
+ (void *)&cmd_flex_filter_priority_value,
+ (void *)&cmd_flex_filter_queue,
+ (void *)&cmd_flex_filter_queue_id,
+ NULL,
+ },
+};
+
+/* *** Filters Control *** */
+
+/* *** deal with ethertype filter *** */
+struct cmd_ethertype_filter_result {
+ cmdline_fixed_string_t filter;
+ uint8_t port_id;
+ cmdline_fixed_string_t ops;
+ cmdline_fixed_string_t mac;
+ struct ether_addr mac_addr;
+ cmdline_fixed_string_t ethertype;
+ uint16_t ethertype_value;
+ cmdline_fixed_string_t drop;
+ cmdline_fixed_string_t queue;
+ uint16_t queue_id;
+};
+
+cmdline_parse_token_string_t cmd_ethertype_filter_filter =
+ TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result,
+ filter, "ethertype_filter");
+cmdline_parse_token_num_t cmd_ethertype_filter_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ethertype_filter_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_ethertype_filter_ops =
+ TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result,
+ ops, "add#del");
+cmdline_parse_token_string_t cmd_ethertype_filter_mac =
+ TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result,
+ mac, "mac_addr#mac_ignr");
+cmdline_parse_token_etheraddr_t cmd_ethertype_filter_mac_addr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_ethertype_filter_result,
+ mac_addr);
+cmdline_parse_token_string_t cmd_ethertype_filter_ethertype =
+ TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result,
+ ethertype, "ethertype");
+cmdline_parse_token_num_t cmd_ethertype_filter_ethertype_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_ethertype_filter_result,
+ ethertype_value, UINT16);
+cmdline_parse_token_string_t cmd_ethertype_filter_drop =
+ TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result,
+ drop, "drop#fwd");
+cmdline_parse_token_string_t cmd_ethertype_filter_queue =
+ TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result,
+ queue, "queue");
+cmdline_parse_token_num_t cmd_ethertype_filter_queue_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ethertype_filter_result,
+ queue_id, UINT16);
+
+static void
+cmd_ethertype_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ethertype_filter_result *res = parsed_result;
+ struct rte_eth_ethertype_filter filter;
+ int ret = 0;
+
+ ret = rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_ETHERTYPE);
+ if (ret < 0) {
+ printf("ethertype filter is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+
+ memset(&filter, 0, sizeof(filter));
+ if (!strcmp(res->mac, "mac_addr")) {
+ filter.flags |= RTE_ETHTYPE_FLAGS_MAC;
+ (void)rte_memcpy(&filter.mac_addr, &res->mac_addr,
+ sizeof(struct ether_addr));
+ }
+ if (!strcmp(res->drop, "drop"))
+ filter.flags |= RTE_ETHTYPE_FLAGS_DROP;
+ filter.ether_type = res->ethertype_value;
+ filter.queue = res->queue_id;
+
+ if (!strcmp(res->ops, "add"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_ETHERTYPE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+ else
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_ETHERTYPE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+ if (ret < 0)
+ printf("ethertype filter programming error: (%s)\n",
+ strerror(-ret));
+}
+
+cmdline_parse_inst_t cmd_ethertype_filter = {
+ .f = cmd_ethertype_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete an ethertype filter entry",
+ .tokens = {
+ (void *)&cmd_ethertype_filter_filter,
+ (void *)&cmd_ethertype_filter_port_id,
+ (void *)&cmd_ethertype_filter_ops,
+ (void *)&cmd_ethertype_filter_mac,
+ (void *)&cmd_ethertype_filter_mac_addr,
+ (void *)&cmd_ethertype_filter_ethertype,
+ (void *)&cmd_ethertype_filter_ethertype_value,
+ (void *)&cmd_ethertype_filter_drop,
+ (void *)&cmd_ethertype_filter_queue,
+ (void *)&cmd_ethertype_filter_queue_id,
+ NULL,
+ },
+};
+
+/* *** deal with flow director filter *** */
+struct cmd_flow_director_result {
+ cmdline_fixed_string_t flow_director_filter;
+ uint8_t port_id;
+ cmdline_fixed_string_t mode;
+ cmdline_fixed_string_t mode_value;
+ cmdline_fixed_string_t ops;
+ cmdline_fixed_string_t flow;
+ cmdline_fixed_string_t flow_type;
+ cmdline_fixed_string_t ether;
+ uint16_t ether_type;
+ cmdline_fixed_string_t src;
+ cmdline_ipaddr_t ip_src;
+ uint16_t port_src;
+ cmdline_fixed_string_t dst;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t port_dst;
+ cmdline_fixed_string_t verify_tag;
+ uint32_t verify_tag_value;
+ cmdline_ipaddr_t tos;
+ uint8_t tos_value;
+ cmdline_ipaddr_t proto;
+ uint8_t proto_value;
+ cmdline_ipaddr_t ttl;
+ uint8_t ttl_value;
+ cmdline_fixed_string_t vlan;
+ uint16_t vlan_value;
+ cmdline_fixed_string_t flexbytes;
+ cmdline_fixed_string_t flexbytes_value;
+ cmdline_fixed_string_t pf_vf;
+ cmdline_fixed_string_t drop;
+ cmdline_fixed_string_t queue;
+ uint16_t queue_id;
+ cmdline_fixed_string_t fd_id;
+ uint32_t fd_id_value;
+ cmdline_fixed_string_t mac;
+ struct ether_addr mac_addr;
+ cmdline_fixed_string_t tunnel;
+ cmdline_fixed_string_t tunnel_type;
+ cmdline_fixed_string_t tunnel_id;
+ uint32_t tunnel_id_value;
+};
+
+static inline int
+parse_flexbytes(const char *q_arg, uint8_t *flexbytes, uint16_t max_num)
+{
+ char s[256];
+ const char *p, *p0 = q_arg;
+ char *end;
+ unsigned long int_fld;
+ char *str_fld[max_num];
+ int i;
+ unsigned size;
+ int ret = -1;
+
+ p = strchr(p0, '(');
+ if (p == NULL)
+ return -1;
+ ++p;
+ p0 = strchr(p, ')');
+ if (p0 == NULL)
+ return -1;
+
+ size = p0 - p;
+ if (size >= sizeof(s))
+ return -1;
+
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ ret = rte_strsplit(s, sizeof(s), str_fld, max_num, ',');
+ if (ret < 0 || ret > max_num)
+ return -1;
+ for (i = 0; i < ret; i++) {
+ errno = 0;
+ int_fld = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || *end != '\0' || int_fld > UINT8_MAX)
+ return -1;
+ flexbytes[i] = (uint8_t)int_fld;
+ }
+ return ret;
+}
+
+static uint16_t
+str2flowtype(char *string)
+{
+ uint8_t i = 0;
+ static const struct {
+ char str[32];
+ uint16_t type;
+ } flowtype_str[] = {
+ {"raw", RTE_ETH_FLOW_RAW},
+ {"ipv4", RTE_ETH_FLOW_IPV4},
+ {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
+ {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
+ {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
+ {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
+ {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
+ {"ipv6", RTE_ETH_FLOW_IPV6},
+ {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
+ {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
+ {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
+ {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
+ {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
+ {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
+ };
+
+ for (i = 0; i < RTE_DIM(flowtype_str); i++) {
+ if (!strcmp(flowtype_str[i].str, string))
+ return flowtype_str[i].type;
+ }
+ return RTE_ETH_FLOW_UNKNOWN;
+}
+
+static enum rte_eth_fdir_tunnel_type
+str2fdir_tunneltype(char *string)
+{
+ uint8_t i = 0;
+
+ static const struct {
+ char str[32];
+ enum rte_eth_fdir_tunnel_type type;
+ } tunneltype_str[] = {
+ {"NVGRE", RTE_FDIR_TUNNEL_TYPE_NVGRE},
+ {"VxLAN", RTE_FDIR_TUNNEL_TYPE_VXLAN},
+ };
+
+ for (i = 0; i < RTE_DIM(tunneltype_str); i++) {
+ if (!strcmp(tunneltype_str[i].str, string))
+ return tunneltype_str[i].type;
+ }
+ return RTE_FDIR_TUNNEL_TYPE_UNKNOWN;
+}
+
+#define IPV4_ADDR_TO_UINT(ip_addr, ip) \
+do { \
+ if ((ip_addr).family == AF_INET) \
+ (ip) = (ip_addr).addr.ipv4.s_addr; \
+ else { \
+ printf("invalid parameter.\n"); \
+ return; \
+ } \
+} while (0)
+
+#define IPV6_ADDR_TO_ARRAY(ip_addr, ip) \
+do { \
+ if ((ip_addr).family == AF_INET6) \
+ (void)rte_memcpy(&(ip), \
+ &((ip_addr).addr.ipv6), \
+ sizeof(struct in6_addr)); \
+ else { \
+ printf("invalid parameter.\n"); \
+ return; \
+ } \
+} while (0)
+
+static void
+cmd_flow_director_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_flow_director_result *res = parsed_result;
+ struct rte_eth_fdir_filter entry;
+ uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+ char *end;
+ unsigned long vf_id;
+ int ret = 0;
+
+ ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_FDIR);
+ if (ret < 0) {
+ printf("flow director is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+ memset(flexbytes, 0, sizeof(flexbytes));
+ memset(&entry, 0, sizeof(struct rte_eth_fdir_filter));
+
+ if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (strcmp(res->mode_value, "MAC-VLAN")) {
+ printf("Please set mode to MAC-VLAN.\n");
+ return;
+ }
+ } else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ if (strcmp(res->mode_value, "Tunnel")) {
+ printf("Please set mode to Tunnel.\n");
+ return;
+ }
+ } else {
+ if (strcmp(res->mode_value, "IP")) {
+ printf("Please set mode to IP.\n");
+ return;
+ }
+ entry.input.flow_type = str2flowtype(res->flow_type);
+ }
+
+ ret = parse_flexbytes(res->flexbytes_value,
+ flexbytes,
+ RTE_ETH_FDIR_MAX_FLEXLEN);
+ if (ret < 0) {
+ printf("error: Cannot parse flexbytes input.\n");
+ return;
+ }
+
+ switch (entry.input.flow_type) {
+ case RTE_ETH_FLOW_FRAG_IPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ entry.input.flow.ip4_flow.proto = res->proto_value;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ IPV4_ADDR_TO_UINT(res->ip_dst,
+ entry.input.flow.ip4_flow.dst_ip);
+ IPV4_ADDR_TO_UINT(res->ip_src,
+ entry.input.flow.ip4_flow.src_ip);
+ entry.input.flow.ip4_flow.tos = res->tos_value;
+ entry.input.flow.ip4_flow.ttl = res->ttl_value;
+ /* need convert to big endian. */
+ entry.input.flow.udp4_flow.dst_port =
+ rte_cpu_to_be_16(res->port_dst);
+ entry.input.flow.udp4_flow.src_port =
+ rte_cpu_to_be_16(res->port_src);
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ IPV4_ADDR_TO_UINT(res->ip_dst,
+ entry.input.flow.sctp4_flow.ip.dst_ip);
+ IPV4_ADDR_TO_UINT(res->ip_src,
+ entry.input.flow.sctp4_flow.ip.src_ip);
+ entry.input.flow.ip4_flow.tos = res->tos_value;
+ entry.input.flow.ip4_flow.ttl = res->ttl_value;
+ /* need convert to big endian. */
+ entry.input.flow.sctp4_flow.dst_port =
+ rte_cpu_to_be_16(res->port_dst);
+ entry.input.flow.sctp4_flow.src_port =
+ rte_cpu_to_be_16(res->port_src);
+ entry.input.flow.sctp4_flow.verify_tag =
+ rte_cpu_to_be_32(res->verify_tag_value);
+ break;
+ case RTE_ETH_FLOW_FRAG_IPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ entry.input.flow.ipv6_flow.proto = res->proto_value;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ IPV6_ADDR_TO_ARRAY(res->ip_dst,
+ entry.input.flow.ipv6_flow.dst_ip);
+ IPV6_ADDR_TO_ARRAY(res->ip_src,
+ entry.input.flow.ipv6_flow.src_ip);
+ entry.input.flow.ipv6_flow.tc = res->tos_value;
+ entry.input.flow.ipv6_flow.hop_limits = res->ttl_value;
+ /* need convert to big endian. */
+ entry.input.flow.udp6_flow.dst_port =
+ rte_cpu_to_be_16(res->port_dst);
+ entry.input.flow.udp6_flow.src_port =
+ rte_cpu_to_be_16(res->port_src);
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ IPV6_ADDR_TO_ARRAY(res->ip_dst,
+ entry.input.flow.sctp6_flow.ip.dst_ip);
+ IPV6_ADDR_TO_ARRAY(res->ip_src,
+ entry.input.flow.sctp6_flow.ip.src_ip);
+ entry.input.flow.ipv6_flow.tc = res->tos_value;
+ entry.input.flow.ipv6_flow.hop_limits = res->ttl_value;
+ /* need convert to big endian. */
+ entry.input.flow.sctp6_flow.dst_port =
+ rte_cpu_to_be_16(res->port_dst);
+ entry.input.flow.sctp6_flow.src_port =
+ rte_cpu_to_be_16(res->port_src);
+ entry.input.flow.sctp6_flow.verify_tag =
+ rte_cpu_to_be_32(res->verify_tag_value);
+ break;
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ entry.input.flow.l2_flow.ether_type =
+ rte_cpu_to_be_16(res->ether_type);
+ break;
+ default:
+ break;
+ }
+
+ if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ (void)rte_memcpy(&entry.input.flow.mac_vlan_flow.mac_addr,
+ &res->mac_addr,
+ sizeof(struct ether_addr));
+
+ if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ (void)rte_memcpy(&entry.input.flow.tunnel_flow.mac_addr,
+ &res->mac_addr,
+ sizeof(struct ether_addr));
+ entry.input.flow.tunnel_flow.tunnel_type =
+ str2fdir_tunneltype(res->tunnel_type);
+ entry.input.flow.tunnel_flow.tunnel_id =
+ rte_cpu_to_be_32(res->tunnel_id_value);
+ }
+
+ (void)rte_memcpy(entry.input.flow_ext.flexbytes,
+ flexbytes,
+ RTE_ETH_FDIR_MAX_FLEXLEN);
+
+ entry.input.flow_ext.vlan_tci = rte_cpu_to_be_16(res->vlan_value);
+
+ entry.action.flex_off = 0; /*use 0 by default */
+ if (!strcmp(res->drop, "drop"))
+ entry.action.behavior = RTE_ETH_FDIR_REJECT;
+ else
+ entry.action.behavior = RTE_ETH_FDIR_ACCEPT;
+
+ if (!strcmp(res->pf_vf, "pf"))
+ entry.input.flow_ext.is_vf = 0;
+ else if (!strncmp(res->pf_vf, "vf", 2)) {
+ struct rte_eth_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(res->port_id, &dev_info);
+ errno = 0;
+ vf_id = strtoul(res->pf_vf + 2, &end, 10);
+ if (errno != 0 || *end != '\0' || vf_id >= dev_info.max_vfs) {
+ printf("invalid parameter %s.\n", res->pf_vf);
+ return;
+ }
+ entry.input.flow_ext.is_vf = 1;
+ entry.input.flow_ext.dst_id = (uint16_t)vf_id;
+ } else {
+ printf("invalid parameter %s.\n", res->pf_vf);
+ return;
+ }
+
+ /* set to report FD ID by default */
+ entry.action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ entry.action.rx_queue = res->queue_id;
+ entry.soft_id = res->fd_id_value;
+ if (!strcmp(res->ops, "add"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_ADD, &entry);
+ else if (!strcmp(res->ops, "del"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_DELETE, &entry);
+ else
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_UPDATE, &entry);
+ if (ret < 0)
+ printf("flow director programming error: (%s)\n",
+ strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_flow_director_filter =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flow_director_filter, "flow_director_filter");
+cmdline_parse_token_num_t cmd_flow_director_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_ops =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ ops, "add#del#update");
+cmdline_parse_token_string_t cmd_flow_director_flow =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flow, "flow");
+cmdline_parse_token_string_t cmd_flow_director_flow_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flow_type, "ipv4-other#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#"
+ "ipv6-other#ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#l2_payload");
+cmdline_parse_token_string_t cmd_flow_director_ether =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ ether, "ether");
+cmdline_parse_token_num_t cmd_flow_director_ether_type =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ ether_type, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ src, "src");
+cmdline_parse_token_ipaddr_t cmd_flow_director_ip_src =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_result,
+ ip_src);
+cmdline_parse_token_num_t cmd_flow_director_port_src =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ port_src, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ dst, "dst");
+cmdline_parse_token_ipaddr_t cmd_flow_director_ip_dst =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_result,
+ ip_dst);
+cmdline_parse_token_num_t cmd_flow_director_port_dst =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ port_dst, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_verify_tag =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ verify_tag, "verify_tag");
+cmdline_parse_token_num_t cmd_flow_director_verify_tag_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ verify_tag_value, UINT32);
+cmdline_parse_token_string_t cmd_flow_director_tos =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ tos, "tos");
+cmdline_parse_token_num_t cmd_flow_director_tos_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ tos_value, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_proto =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ proto, "proto");
+cmdline_parse_token_num_t cmd_flow_director_proto_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ proto_value, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_ttl =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ ttl, "ttl");
+cmdline_parse_token_num_t cmd_flow_director_ttl_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ ttl_value, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ vlan, "vlan");
+cmdline_parse_token_num_t cmd_flow_director_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ vlan_value, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_flexbytes =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flexbytes, "flexbytes");
+cmdline_parse_token_string_t cmd_flow_director_flexbytes_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flexbytes_value, NULL);
+cmdline_parse_token_string_t cmd_flow_director_drop =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ drop, "drop#fwd");
+cmdline_parse_token_string_t cmd_flow_director_pf_vf =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ pf_vf, NULL);
+cmdline_parse_token_string_t cmd_flow_director_queue =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ queue, "queue");
+cmdline_parse_token_num_t cmd_flow_director_queue_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ queue_id, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_fd_id =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ fd_id, "fd_id");
+cmdline_parse_token_num_t cmd_flow_director_fd_id_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ fd_id_value, UINT32);
+
+cmdline_parse_token_string_t cmd_flow_director_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ mode, "mode");
+cmdline_parse_token_string_t cmd_flow_director_mode_ip =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ mode_value, "IP");
+cmdline_parse_token_string_t cmd_flow_director_mode_mac_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ mode_value, "MAC-VLAN");
+cmdline_parse_token_string_t cmd_flow_director_mode_tunnel =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ mode_value, "Tunnel");
+cmdline_parse_token_string_t cmd_flow_director_mac =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ mac, "mac");
+cmdline_parse_token_etheraddr_t cmd_flow_director_mac_addr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_flow_director_result,
+ mac_addr);
+cmdline_parse_token_string_t cmd_flow_director_tunnel =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ tunnel, "tunnel");
+cmdline_parse_token_string_t cmd_flow_director_tunnel_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ tunnel_type, "NVGRE#VxLAN");
+cmdline_parse_token_string_t cmd_flow_director_tunnel_id =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ tunnel_id, "tunnel-id");
+cmdline_parse_token_num_t cmd_flow_director_tunnel_id_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ tunnel_id_value, UINT32);
+
+cmdline_parse_inst_t cmd_add_del_ip_flow_director = {
+ .f = cmd_flow_director_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete an ip flow director entry on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_filter,
+ (void *)&cmd_flow_director_port_id,
+ (void *)&cmd_flow_director_mode,
+ (void *)&cmd_flow_director_mode_ip,
+ (void *)&cmd_flow_director_ops,
+ (void *)&cmd_flow_director_flow,
+ (void *)&cmd_flow_director_flow_type,
+ (void *)&cmd_flow_director_src,
+ (void *)&cmd_flow_director_ip_src,
+ (void *)&cmd_flow_director_dst,
+ (void *)&cmd_flow_director_ip_dst,
+ (void *)&cmd_flow_director_tos,
+ (void *)&cmd_flow_director_tos_value,
+ (void *)&cmd_flow_director_proto,
+ (void *)&cmd_flow_director_proto_value,
+ (void *)&cmd_flow_director_ttl,
+ (void *)&cmd_flow_director_ttl_value,
+ (void *)&cmd_flow_director_vlan,
+ (void *)&cmd_flow_director_vlan_value,
+ (void *)&cmd_flow_director_flexbytes,
+ (void *)&cmd_flow_director_flexbytes_value,
+ (void *)&cmd_flow_director_drop,
+ (void *)&cmd_flow_director_pf_vf,
+ (void *)&cmd_flow_director_queue,
+ (void *)&cmd_flow_director_queue_id,
+ (void *)&cmd_flow_director_fd_id,
+ (void *)&cmd_flow_director_fd_id_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_add_del_udp_flow_director = {
+ .f = cmd_flow_director_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete an udp/tcp flow director entry on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_filter,
+ (void *)&cmd_flow_director_port_id,
+ (void *)&cmd_flow_director_mode,
+ (void *)&cmd_flow_director_mode_ip,
+ (void *)&cmd_flow_director_ops,
+ (void *)&cmd_flow_director_flow,
+ (void *)&cmd_flow_director_flow_type,
+ (void *)&cmd_flow_director_src,
+ (void *)&cmd_flow_director_ip_src,
+ (void *)&cmd_flow_director_port_src,
+ (void *)&cmd_flow_director_dst,
+ (void *)&cmd_flow_director_ip_dst,
+ (void *)&cmd_flow_director_port_dst,
+ (void *)&cmd_flow_director_tos,
+ (void *)&cmd_flow_director_tos_value,
+ (void *)&cmd_flow_director_ttl,
+ (void *)&cmd_flow_director_ttl_value,
+ (void *)&cmd_flow_director_vlan,
+ (void *)&cmd_flow_director_vlan_value,
+ (void *)&cmd_flow_director_flexbytes,
+ (void *)&cmd_flow_director_flexbytes_value,
+ (void *)&cmd_flow_director_drop,
+ (void *)&cmd_flow_director_pf_vf,
+ (void *)&cmd_flow_director_queue,
+ (void *)&cmd_flow_director_queue_id,
+ (void *)&cmd_flow_director_fd_id,
+ (void *)&cmd_flow_director_fd_id_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_add_del_sctp_flow_director = {
+ .f = cmd_flow_director_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete a sctp flow director entry on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_filter,
+ (void *)&cmd_flow_director_port_id,
+ (void *)&cmd_flow_director_mode,
+ (void *)&cmd_flow_director_mode_ip,
+ (void *)&cmd_flow_director_ops,
+ (void *)&cmd_flow_director_flow,
+ (void *)&cmd_flow_director_flow_type,
+ (void *)&cmd_flow_director_src,
+ (void *)&cmd_flow_director_ip_src,
+ (void *)&cmd_flow_director_port_dst,
+ (void *)&cmd_flow_director_dst,
+ (void *)&cmd_flow_director_ip_dst,
+ (void *)&cmd_flow_director_port_dst,
+ (void *)&cmd_flow_director_verify_tag,
+ (void *)&cmd_flow_director_verify_tag_value,
+ (void *)&cmd_flow_director_tos,
+ (void *)&cmd_flow_director_tos_value,
+ (void *)&cmd_flow_director_ttl,
+ (void *)&cmd_flow_director_ttl_value,
+ (void *)&cmd_flow_director_vlan,
+ (void *)&cmd_flow_director_vlan_value,
+ (void *)&cmd_flow_director_flexbytes,
+ (void *)&cmd_flow_director_flexbytes_value,
+ (void *)&cmd_flow_director_drop,
+ (void *)&cmd_flow_director_pf_vf,
+ (void *)&cmd_flow_director_queue,
+ (void *)&cmd_flow_director_queue_id,
+ (void *)&cmd_flow_director_fd_id,
+ (void *)&cmd_flow_director_fd_id_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_add_del_l2_flow_director = {
+ .f = cmd_flow_director_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete a L2 flow director entry on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_filter,
+ (void *)&cmd_flow_director_port_id,
+ (void *)&cmd_flow_director_mode,
+ (void *)&cmd_flow_director_mode_ip,
+ (void *)&cmd_flow_director_ops,
+ (void *)&cmd_flow_director_flow,
+ (void *)&cmd_flow_director_flow_type,
+ (void *)&cmd_flow_director_ether,
+ (void *)&cmd_flow_director_ether_type,
+ (void *)&cmd_flow_director_flexbytes,
+ (void *)&cmd_flow_director_flexbytes_value,
+ (void *)&cmd_flow_director_drop,
+ (void *)&cmd_flow_director_pf_vf,
+ (void *)&cmd_flow_director_queue,
+ (void *)&cmd_flow_director_queue_id,
+ (void *)&cmd_flow_director_fd_id,
+ (void *)&cmd_flow_director_fd_id_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_add_del_mac_vlan_flow_director = {
+ .f = cmd_flow_director_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete a MAC VLAN flow director entry on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_filter,
+ (void *)&cmd_flow_director_port_id,
+ (void *)&cmd_flow_director_mode,
+ (void *)&cmd_flow_director_mode_mac_vlan,
+ (void *)&cmd_flow_director_ops,
+ (void *)&cmd_flow_director_mac,
+ (void *)&cmd_flow_director_mac_addr,
+ (void *)&cmd_flow_director_vlan,
+ (void *)&cmd_flow_director_vlan_value,
+ (void *)&cmd_flow_director_flexbytes,
+ (void *)&cmd_flow_director_flexbytes_value,
+ (void *)&cmd_flow_director_drop,
+ (void *)&cmd_flow_director_queue,
+ (void *)&cmd_flow_director_queue_id,
+ (void *)&cmd_flow_director_fd_id,
+ (void *)&cmd_flow_director_fd_id_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_add_del_tunnel_flow_director = {
+ .f = cmd_flow_director_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete a tunnel flow director entry on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_filter,
+ (void *)&cmd_flow_director_port_id,
+ (void *)&cmd_flow_director_mode,
+ (void *)&cmd_flow_director_mode_tunnel,
+ (void *)&cmd_flow_director_ops,
+ (void *)&cmd_flow_director_mac,
+ (void *)&cmd_flow_director_mac_addr,
+ (void *)&cmd_flow_director_vlan,
+ (void *)&cmd_flow_director_vlan_value,
+ (void *)&cmd_flow_director_tunnel,
+ (void *)&cmd_flow_director_tunnel_type,
+ (void *)&cmd_flow_director_tunnel_id,
+ (void *)&cmd_flow_director_tunnel_id_value,
+ (void *)&cmd_flow_director_flexbytes,
+ (void *)&cmd_flow_director_flexbytes_value,
+ (void *)&cmd_flow_director_drop,
+ (void *)&cmd_flow_director_queue,
+ (void *)&cmd_flow_director_queue_id,
+ (void *)&cmd_flow_director_fd_id,
+ (void *)&cmd_flow_director_fd_id_value,
+ NULL,
+ },
+};
+
+struct cmd_flush_flow_director_result {
+ cmdline_fixed_string_t flush_flow_director;
+ uint8_t port_id;
+};
+
+cmdline_parse_token_string_t cmd_flush_flow_director_flush =
+ TOKEN_STRING_INITIALIZER(struct cmd_flush_flow_director_result,
+ flush_flow_director, "flush_flow_director");
+cmdline_parse_token_num_t cmd_flush_flow_director_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flush_flow_director_result,
+ port_id, UINT8);
+
+static void
+cmd_flush_flow_director_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_flow_director_result *res = parsed_result;
+ int ret = 0;
+
+ ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_FDIR);
+ if (ret < 0) {
+ printf("flow director is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_FLUSH, NULL);
+ if (ret < 0)
+ printf("flow director table flushing error: (%s)\n",
+ strerror(-ret));
+}
+
+cmdline_parse_inst_t cmd_flush_flow_director = {
+ .f = cmd_flush_flow_director_parsed,
+ .data = NULL,
+ .help_str = "flush all flow director entries of a device on NIC",
+ .tokens = {
+ (void *)&cmd_flush_flow_director_flush,
+ (void *)&cmd_flush_flow_director_port_id,
+ NULL,
+ },
+};
+
+/* *** deal with flow director mask *** */
+struct cmd_flow_director_mask_result {
+ cmdline_fixed_string_t flow_director_mask;
+ uint8_t port_id;
+ cmdline_fixed_string_t mode;
+ cmdline_fixed_string_t mode_value;
+ cmdline_fixed_string_t vlan;
+ uint16_t vlan_mask;
+ cmdline_fixed_string_t src_mask;
+ cmdline_ipaddr_t ipv4_src;
+ cmdline_ipaddr_t ipv6_src;
+ uint16_t port_src;
+ cmdline_fixed_string_t dst_mask;
+ cmdline_ipaddr_t ipv4_dst;
+ cmdline_ipaddr_t ipv6_dst;
+ uint16_t port_dst;
+ cmdline_fixed_string_t mac;
+ uint8_t mac_addr_byte_mask;
+ cmdline_fixed_string_t tunnel_id;
+ uint32_t tunnel_id_mask;
+ cmdline_fixed_string_t tunnel_type;
+ uint8_t tunnel_type_mask;
+};
+
+static void
+cmd_flow_director_mask_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_flow_director_mask_result *res = parsed_result;
+ struct rte_eth_fdir_masks *mask;
+ struct rte_port *port;
+
+ if (res->port_id > nb_ports) {
+ printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+ return;
+ }
+
+ port = &ports[res->port_id];
+ /** Check if the port is not started **/
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Please stop port %d first\n", res->port_id);
+ return;
+ }
+
+ mask = &port->dev_conf.fdir_conf.mask;
+
+ if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (strcmp(res->mode_value, "MAC-VLAN")) {
+ printf("Please set mode to MAC-VLAN.\n");
+ return;
+ }
+
+ mask->vlan_tci_mask = res->vlan_mask;
+ mask->mac_addr_byte_mask = res->mac_addr_byte_mask;
+ } else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ if (strcmp(res->mode_value, "Tunnel")) {
+ printf("Please set mode to Tunnel.\n");
+ return;
+ }
+
+ mask->vlan_tci_mask = res->vlan_mask;
+ mask->mac_addr_byte_mask = res->mac_addr_byte_mask;
+ mask->tunnel_id_mask = res->tunnel_id_mask;
+ mask->tunnel_type_mask = res->tunnel_type_mask;
+ } else {
+ if (strcmp(res->mode_value, "IP")) {
+ printf("Please set mode to IP.\n");
+ return;
+ }
+
+ mask->vlan_tci_mask = rte_cpu_to_be_16(res->vlan_mask);
+ IPV4_ADDR_TO_UINT(res->ipv4_src, mask->ipv4_mask.src_ip);
+ IPV4_ADDR_TO_UINT(res->ipv4_dst, mask->ipv4_mask.dst_ip);
+ IPV6_ADDR_TO_ARRAY(res->ipv6_src, mask->ipv6_mask.src_ip);
+ IPV6_ADDR_TO_ARRAY(res->ipv6_dst, mask->ipv6_mask.dst_ip);
+ mask->src_port_mask = rte_cpu_to_be_16(res->port_src);
+ mask->dst_port_mask = rte_cpu_to_be_16(res->port_dst);
+ }
+
+ cmd_reconfig_device_queue(res->port_id, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_flow_director_mask =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ flow_director_mask, "flow_director_mask");
+cmdline_parse_token_num_t cmd_flow_director_mask_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_mask_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ vlan, "vlan");
+cmdline_parse_token_num_t cmd_flow_director_mask_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result,
+ vlan_mask, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_mask_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ src_mask, "src_mask");
+cmdline_parse_token_ipaddr_t cmd_flow_director_mask_ipv4_src =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_mask_result,
+ ipv4_src);
+cmdline_parse_token_ipaddr_t cmd_flow_director_mask_ipv6_src =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_mask_result,
+ ipv6_src);
+cmdline_parse_token_num_t cmd_flow_director_mask_port_src =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result,
+ port_src, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_mask_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ dst_mask, "dst_mask");
+cmdline_parse_token_ipaddr_t cmd_flow_director_mask_ipv4_dst =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_mask_result,
+ ipv4_dst);
+cmdline_parse_token_ipaddr_t cmd_flow_director_mask_ipv6_dst =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_mask_result,
+ ipv6_dst);
+cmdline_parse_token_num_t cmd_flow_director_mask_port_dst =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result,
+ port_dst, UINT16);
+
+cmdline_parse_token_string_t cmd_flow_director_mask_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ mode, "mode");
+cmdline_parse_token_string_t cmd_flow_director_mask_mode_ip =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ mode_value, "IP");
+cmdline_parse_token_string_t cmd_flow_director_mask_mode_mac_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ mode_value, "MAC-VLAN");
+cmdline_parse_token_string_t cmd_flow_director_mask_mode_tunnel =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ mode_value, "Tunnel");
+cmdline_parse_token_string_t cmd_flow_director_mask_mac =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ mac, "mac");
+cmdline_parse_token_num_t cmd_flow_director_mask_mac_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result,
+ mac_addr_byte_mask, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_mask_tunnel_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ tunnel_type, "tunnel-type");
+cmdline_parse_token_num_t cmd_flow_director_mask_tunnel_type_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result,
+ tunnel_type_mask, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_mask_tunnel_id =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
+ tunnel_id, "tunnel-id");
+cmdline_parse_token_num_t cmd_flow_director_mask_tunnel_id_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result,
+ tunnel_id_mask, UINT32);
+
+cmdline_parse_inst_t cmd_set_flow_director_ip_mask = {
+ .f = cmd_flow_director_mask_parsed,
+ .data = NULL,
+ .help_str = "set IP mode flow director's mask on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_mask,
+ (void *)&cmd_flow_director_mask_port_id,
+ (void *)&cmd_flow_director_mask_mode,
+ (void *)&cmd_flow_director_mask_mode_ip,
+ (void *)&cmd_flow_director_mask_vlan,
+ (void *)&cmd_flow_director_mask_vlan_value,
+ (void *)&cmd_flow_director_mask_src,
+ (void *)&cmd_flow_director_mask_ipv4_src,
+ (void *)&cmd_flow_director_mask_ipv6_src,
+ (void *)&cmd_flow_director_mask_port_src,
+ (void *)&cmd_flow_director_mask_dst,
+ (void *)&cmd_flow_director_mask_ipv4_dst,
+ (void *)&cmd_flow_director_mask_ipv6_dst,
+ (void *)&cmd_flow_director_mask_port_dst,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_flow_director_mac_vlan_mask = {
+ .f = cmd_flow_director_mask_parsed,
+ .data = NULL,
+ .help_str = "set MAC VLAN mode flow director's mask on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_mask,
+ (void *)&cmd_flow_director_mask_port_id,
+ (void *)&cmd_flow_director_mask_mode,
+ (void *)&cmd_flow_director_mask_mode_mac_vlan,
+ (void *)&cmd_flow_director_mask_vlan,
+ (void *)&cmd_flow_director_mask_vlan_value,
+ (void *)&cmd_flow_director_mask_mac,
+ (void *)&cmd_flow_director_mask_mac_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_flow_director_tunnel_mask = {
+ .f = cmd_flow_director_mask_parsed,
+ .data = NULL,
+ .help_str = "set tunnel mode flow director's mask on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_mask,
+ (void *)&cmd_flow_director_mask_port_id,
+ (void *)&cmd_flow_director_mask_mode,
+ (void *)&cmd_flow_director_mask_mode_tunnel,
+ (void *)&cmd_flow_director_mask_vlan,
+ (void *)&cmd_flow_director_mask_vlan_value,
+ (void *)&cmd_flow_director_mask_mac,
+ (void *)&cmd_flow_director_mask_mac_value,
+ (void *)&cmd_flow_director_mask_tunnel_type,
+ (void *)&cmd_flow_director_mask_tunnel_type_value,
+ (void *)&cmd_flow_director_mask_tunnel_id,
+ (void *)&cmd_flow_director_mask_tunnel_id_value,
+ NULL,
+ },
+};
+
+/* *** deal with flow director mask on flexible payload *** */
+struct cmd_flow_director_flex_mask_result {
+ cmdline_fixed_string_t flow_director_flexmask;
+ uint8_t port_id;
+ cmdline_fixed_string_t flow;
+ cmdline_fixed_string_t flow_type;
+ cmdline_fixed_string_t mask;
+};
+
+static void
+cmd_flow_director_flex_mask_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_flow_director_flex_mask_result *res = parsed_result;
+ struct rte_eth_fdir_info fdir_info;
+ struct rte_eth_fdir_flex_mask flex_mask;
+ struct rte_port *port;
+ uint32_t flow_type_mask;
+ uint16_t i;
+ int ret;
+
+ if (res->port_id > nb_ports) {
+ printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+ return;
+ }
+
+ port = &ports[res->port_id];
+ /** Check if the port is not started **/
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Please stop port %d first\n", res->port_id);
+ return;
+ }
+
+ memset(&flex_mask, 0, sizeof(struct rte_eth_fdir_flex_mask));
+ ret = parse_flexbytes(res->mask,
+ flex_mask.mask,
+ RTE_ETH_FDIR_MAX_FLEXLEN);
+ if (ret < 0) {
+ printf("error: Cannot parse mask input.\n");
+ return;
+ }
+
+ memset(&fdir_info, 0, sizeof(fdir_info));
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_INFO, &fdir_info);
+ if (ret < 0) {
+ printf("Cannot get FDir filter info\n");
+ return;
+ }
+
+ if (!strcmp(res->flow_type, "none")) {
+ /* means don't specify the flow type */
+ flex_mask.flow_type = RTE_ETH_FLOW_UNKNOWN;
+ for (i = 0; i < RTE_ETH_FLOW_MAX; i++)
+ memset(&port->dev_conf.fdir_conf.flex_conf.flex_mask[i],
+ 0, sizeof(struct rte_eth_fdir_flex_mask));
+ port->dev_conf.fdir_conf.flex_conf.nb_flexmasks = 1;
+ (void)rte_memcpy(&port->dev_conf.fdir_conf.flex_conf.flex_mask[0],
+ &flex_mask,
+ sizeof(struct rte_eth_fdir_flex_mask));
+ cmd_reconfig_device_queue(res->port_id, 1, 1);
+ return;
+ }
+ flow_type_mask = fdir_info.flow_types_mask[0];
+ if (!strcmp(res->flow_type, "all")) {
+ if (!flow_type_mask) {
+ printf("No flow type supported\n");
+ return;
+ }
+ for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
+ if (flow_type_mask & (1 << i)) {
+ flex_mask.flow_type = i;
+ fdir_set_flex_mask(res->port_id, &flex_mask);
+ }
+ }
+ cmd_reconfig_device_queue(res->port_id, 1, 1);
+ return;
+ }
+ flex_mask.flow_type = str2flowtype(res->flow_type);
+ if (!(flow_type_mask & (1 << flex_mask.flow_type))) {
+ printf("Flow type %s not supported on port %d\n",
+ res->flow_type, res->port_id);
+ return;
+ }
+ fdir_set_flex_mask(res->port_id, &flex_mask);
+ cmd_reconfig_device_queue(res->port_id, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_flow_director_flexmask =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result,
+ flow_director_flexmask,
+ "flow_director_flex_mask");
+cmdline_parse_token_num_t cmd_flow_director_flexmask_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_flex_mask_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_flexmask_flow =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result,
+ flow, "flow");
+cmdline_parse_token_string_t cmd_flow_director_flexmask_flow_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result,
+ flow_type, "none#ipv4-other#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#"
+ "ipv6-other#ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#l2_payload#all");
+cmdline_parse_token_string_t cmd_flow_director_flexmask_mask =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result,
+ mask, NULL);
+
+cmdline_parse_inst_t cmd_set_flow_director_flex_mask = {
+ .f = cmd_flow_director_flex_mask_parsed,
+ .data = NULL,
+ .help_str = "set flow director's flex mask on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_flexmask,
+ (void *)&cmd_flow_director_flexmask_port_id,
+ (void *)&cmd_flow_director_flexmask_flow,
+ (void *)&cmd_flow_director_flexmask_flow_type,
+ (void *)&cmd_flow_director_flexmask_mask,
+ NULL,
+ },
+};
+
+/* *** deal with flow director flexible payload configuration *** */
+struct cmd_flow_director_flexpayload_result {
+ cmdline_fixed_string_t flow_director_flexpayload;
+ uint8_t port_id;
+ cmdline_fixed_string_t payload_layer;
+ cmdline_fixed_string_t payload_cfg;
+};
+
+static inline int
+parse_offsets(const char *q_arg, uint16_t *offsets, uint16_t max_num)
+{
+ char s[256];
+ const char *p, *p0 = q_arg;
+ char *end;
+ unsigned long int_fld;
+ char *str_fld[max_num];
+ int i;
+ unsigned size;
+ int ret = -1;
+
+ p = strchr(p0, '(');
+ if (p == NULL)
+ return -1;
+ ++p;
+ p0 = strchr(p, ')');
+ if (p0 == NULL)
+ return -1;
+
+ size = p0 - p;
+ if (size >= sizeof(s))
+ return -1;
+
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ ret = rte_strsplit(s, sizeof(s), str_fld, max_num, ',');
+ if (ret < 0 || ret > max_num)
+ return -1;
+ for (i = 0; i < ret; i++) {
+ errno = 0;
+ int_fld = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || *end != '\0' || int_fld > UINT16_MAX)
+ return -1;
+ offsets[i] = (uint16_t)int_fld;
+ }
+ return ret;
+}
+
+static void
+cmd_flow_director_flxpld_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_flow_director_flexpayload_result *res = parsed_result;
+ struct rte_eth_flex_payload_cfg flex_cfg;
+ struct rte_port *port;
+ int ret = 0;
+
+ if (res->port_id > nb_ports) {
+ printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+ return;
+ }
+
+ port = &ports[res->port_id];
+ /** Check if the port is not started **/
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Please stop port %d first\n", res->port_id);
+ return;
+ }
+
+ memset(&flex_cfg, 0, sizeof(struct rte_eth_flex_payload_cfg));
+
+ if (!strcmp(res->payload_layer, "raw"))
+ flex_cfg.type = RTE_ETH_RAW_PAYLOAD;
+ else if (!strcmp(res->payload_layer, "l2"))
+ flex_cfg.type = RTE_ETH_L2_PAYLOAD;
+ else if (!strcmp(res->payload_layer, "l3"))
+ flex_cfg.type = RTE_ETH_L3_PAYLOAD;
+ else if (!strcmp(res->payload_layer, "l4"))
+ flex_cfg.type = RTE_ETH_L4_PAYLOAD;
+
+ ret = parse_offsets(res->payload_cfg, flex_cfg.src_offset,
+ RTE_ETH_FDIR_MAX_FLEXLEN);
+ if (ret < 0) {
+ printf("error: Cannot parse flex payload input.\n");
+ return;
+ }
+
+ fdir_set_flex_payload(res->port_id, &flex_cfg);
+ cmd_reconfig_device_queue(res->port_id, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_flow_director_flexpayload =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flexpayload_result,
+ flow_director_flexpayload,
+ "flow_director_flex_payload");
+cmdline_parse_token_num_t cmd_flow_director_flexpayload_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_flexpayload_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_flow_director_flexpayload_payload_layer =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flexpayload_result,
+ payload_layer, "raw#l2#l3#l4");
+cmdline_parse_token_string_t cmd_flow_director_flexpayload_payload_cfg =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flexpayload_result,
+ payload_cfg, NULL);
+
+cmdline_parse_inst_t cmd_set_flow_director_flex_payload = {
+ .f = cmd_flow_director_flxpld_parsed,
+ .data = NULL,
+ .help_str = "set flow director's flex payload on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_flexpayload,
+ (void *)&cmd_flow_director_flexpayload_port_id,
+ (void *)&cmd_flow_director_flexpayload_payload_layer,
+ (void *)&cmd_flow_director_flexpayload_payload_cfg,
+ NULL,
+ },
+};
+
+/* *** Classification Filters Control *** */
+/* *** Get symmetric hash enable per port *** */
+struct cmd_get_sym_hash_ena_per_port_result {
+ cmdline_fixed_string_t get_sym_hash_ena_per_port;
+ uint8_t port_id;
+};
+
+static void
+cmd_get_sym_hash_per_port_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_get_sym_hash_ena_per_port_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+ int ret;
+
+ if (rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_HASH) < 0) {
+ printf("RTE_ETH_FILTER_HASH not supported on port: %d\n",
+ res->port_id);
+ return;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT;
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_GET, &info);
+
+ if (ret < 0) {
+ printf("Cannot get symmetric hash enable per port "
+ "on port %u\n", res->port_id);
+ return;
+ }
+
+ printf("Symmetric hash is %s on port %u\n", info.info.enable ?
+ "enabled" : "disabled", res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_get_sym_hash_ena_per_port_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_get_sym_hash_ena_per_port_result,
+ get_sym_hash_ena_per_port, "get_sym_hash_ena_per_port");
+cmdline_parse_token_num_t cmd_get_sym_hash_ena_per_port_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_get_sym_hash_ena_per_port_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_get_sym_hash_ena_per_port = {
+ .f = cmd_get_sym_hash_per_port_parsed,
+ .data = NULL,
+ .help_str = "get_sym_hash_ena_per_port port_id",
+ .tokens = {
+ (void *)&cmd_get_sym_hash_ena_per_port_all,
+ (void *)&cmd_get_sym_hash_ena_per_port_port_id,
+ NULL,
+ },
+};
+
+/* *** Set symmetric hash enable per port *** */
+struct cmd_set_sym_hash_ena_per_port_result {
+ cmdline_fixed_string_t set_sym_hash_ena_per_port;
+ cmdline_fixed_string_t enable;
+ uint8_t port_id;
+};
+
+static void
+cmd_set_sym_hash_per_port_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_sym_hash_ena_per_port_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+ int ret;
+
+ if (rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_HASH) < 0) {
+ printf("RTE_ETH_FILTER_HASH not supported on port: %d\n",
+ res->port_id);
+ return;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT;
+ if (!strcmp(res->enable, "enable"))
+ info.info.enable = 1;
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+ if (ret < 0) {
+ printf("Cannot set symmetric hash enable per port on "
+ "port %u\n", res->port_id);
+ return;
+ }
+ printf("Symmetric hash has been set to %s on port %u\n",
+ res->enable, res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result,
+ set_sym_hash_ena_per_port, "set_sym_hash_ena_per_port");
+cmdline_parse_token_num_t cmd_set_sym_hash_ena_per_port_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_enable =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result,
+ enable, "enable#disable");
+
+cmdline_parse_inst_t cmd_set_sym_hash_ena_per_port = {
+ .f = cmd_set_sym_hash_per_port_parsed,
+ .data = NULL,
+ .help_str = "set_sym_hash_ena_per_port port_id enable|disable",
+ .tokens = {
+ (void *)&cmd_set_sym_hash_ena_per_port_all,
+ (void *)&cmd_set_sym_hash_ena_per_port_port_id,
+ (void *)&cmd_set_sym_hash_ena_per_port_enable,
+ NULL,
+ },
+};
+
+/* Get global config of hash function */
+struct cmd_get_hash_global_config_result {
+ cmdline_fixed_string_t get_hash_global_config;
+ uint8_t port_id;
+};
+
+static char *
+flowtype_to_str(uint16_t ftype)
+{
+ uint16_t i;
+ static struct {
+ char str[16];
+ uint16_t ftype;
+ } ftype_table[] = {
+ {"ipv4", RTE_ETH_FLOW_IPV4},
+ {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
+ {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
+ {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
+ {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
+ {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
+ {"ipv6", RTE_ETH_FLOW_IPV6},
+ {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
+ {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
+ {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
+ {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
+ {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
+ {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
+ };
+
+ for (i = 0; i < RTE_DIM(ftype_table); i++) {
+ if (ftype_table[i].ftype == ftype)
+ return ftype_table[i].str;
+ }
+
+ return NULL;
+}
+
+static void
+cmd_get_hash_global_config_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_get_hash_global_config_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+ uint32_t idx, offset;
+ uint16_t i;
+ char *str;
+ int ret;
+
+ if (rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_HASH) < 0) {
+ printf("RTE_ETH_FILTER_HASH not supported on port %d\n",
+ res->port_id);
+ return;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_GLOBAL_CONFIG;
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_GET, &info);
+ if (ret < 0) {
+ printf("Cannot get hash global configurations by port %d\n",
+ res->port_id);
+ return;
+ }
+
+ switch (info.info.global_conf.hash_func) {
+ case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+ printf("Hash function is Toeplitz\n");
+ break;
+ case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
+ printf("Hash function is Simple XOR\n");
+ break;
+ default:
+ printf("Unknown hash function\n");
+ break;
+ }
+
+ for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
+ idx = i / UINT32_BIT;
+ offset = i % UINT32_BIT;
+ if (!(info.info.global_conf.valid_bit_mask[idx] &
+ (1UL << offset)))
+ continue;
+ str = flowtype_to_str(i);
+ if (!str)
+ continue;
+ printf("Symmetric hash is %s globally for flow type %s "
+ "by port %d\n",
+ ((info.info.global_conf.sym_hash_enable_mask[idx] &
+ (1UL << offset)) ? "enabled" : "disabled"), str,
+ res->port_id);
+ }
+}
+
+cmdline_parse_token_string_t cmd_get_hash_global_config_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_get_hash_global_config_result,
+ get_hash_global_config, "get_hash_global_config");
+cmdline_parse_token_num_t cmd_get_hash_global_config_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_get_hash_global_config_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_get_hash_global_config = {
+ .f = cmd_get_hash_global_config_parsed,
+ .data = NULL,
+ .help_str = "get_hash_global_config port_id",
+ .tokens = {
+ (void *)&cmd_get_hash_global_config_all,
+ (void *)&cmd_get_hash_global_config_port_id,
+ NULL,
+ },
+};
+
+/* Set global config of hash function */
+struct cmd_set_hash_global_config_result {
+ cmdline_fixed_string_t set_hash_global_config;
+ uint8_t port_id;
+ cmdline_fixed_string_t hash_func;
+ cmdline_fixed_string_t flow_type;
+ cmdline_fixed_string_t enable;
+};
+
+static void
+cmd_set_hash_global_config_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_hash_global_config_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+ uint32_t ftype, idx, offset;
+ int ret;
+
+ if (rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_HASH) < 0) {
+ printf("RTE_ETH_FILTER_HASH not supported on port %d\n",
+ res->port_id);
+ return;
+ }
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_GLOBAL_CONFIG;
+ if (!strcmp(res->hash_func, "toeplitz"))
+ info.info.global_conf.hash_func =
+ RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ else if (!strcmp(res->hash_func, "simple_xor"))
+ info.info.global_conf.hash_func =
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ else if (!strcmp(res->hash_func, "default"))
+ info.info.global_conf.hash_func =
+ RTE_ETH_HASH_FUNCTION_DEFAULT;
+
+ ftype = str2flowtype(res->flow_type);
+ idx = ftype / (CHAR_BIT * sizeof(uint32_t));
+ offset = ftype % (CHAR_BIT * sizeof(uint32_t));
+ info.info.global_conf.valid_bit_mask[idx] |= (1UL << offset);
+ if (!strcmp(res->enable, "enable"))
+ info.info.global_conf.sym_hash_enable_mask[idx] |=
+ (1UL << offset);
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+ if (ret < 0)
+ printf("Cannot set global hash configurations by port %d\n",
+ res->port_id);
+ else
+ printf("Global hash configurations have been set "
+ "succcessfully by port %d\n", res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_set_hash_global_config_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
+ set_hash_global_config, "set_hash_global_config");
+cmdline_parse_token_num_t cmd_set_hash_global_config_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_hash_global_config_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_hash_global_config_hash_func =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
+ hash_func, "toeplitz#simple_xor#default");
+cmdline_parse_token_string_t cmd_set_hash_global_config_flow_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
+ flow_type,
+ "ipv4#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#ipv4-other#ipv6#"
+ "ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#ipv6-other#l2_payload");
+cmdline_parse_token_string_t cmd_set_hash_global_config_enable =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
+ enable, "enable#disable");
+
+cmdline_parse_inst_t cmd_set_hash_global_config = {
+ .f = cmd_set_hash_global_config_parsed,
+ .data = NULL,
+ .help_str = "set_hash_global_config port_id "
+ "toeplitz|simple_xor|default "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "enable|disable",
+ .tokens = {
+ (void *)&cmd_set_hash_global_config_all,
+ (void *)&cmd_set_hash_global_config_port_id,
+ (void *)&cmd_set_hash_global_config_hash_func,
+ (void *)&cmd_set_hash_global_config_flow_type,
+ (void *)&cmd_set_hash_global_config_enable,
+ NULL,
+ },
+};
+
+/* Set hash input set */
+struct cmd_set_hash_input_set_result {
+ cmdline_fixed_string_t set_hash_input_set;
+ uint8_t port_id;
+ cmdline_fixed_string_t flow_type;
+ cmdline_fixed_string_t inset_field;
+ cmdline_fixed_string_t select;
+};
+
+static enum rte_eth_input_set_field
+str2inset(char *string)
+{
+ uint16_t i;
+
+ static const struct {
+ char str[32];
+ enum rte_eth_input_set_field inset;
+ } inset_table[] = {
+ {"ethertype", RTE_ETH_INPUT_SET_L2_ETHERTYPE},
+ {"ovlan", RTE_ETH_INPUT_SET_L2_OUTER_VLAN},
+ {"ivlan", RTE_ETH_INPUT_SET_L2_INNER_VLAN},
+ {"src-ipv4", RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {"dst-ipv4", RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {"ipv4-tos", RTE_ETH_INPUT_SET_L3_IP4_TOS},
+ {"ipv4-proto", RTE_ETH_INPUT_SET_L3_IP4_PROTO},
+ {"ipv4-ttl", RTE_ETH_INPUT_SET_L3_IP4_TTL},
+ {"src-ipv6", RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {"dst-ipv6", RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {"ipv6-tc", RTE_ETH_INPUT_SET_L3_IP6_TC},
+ {"ipv6-next-header", RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER},
+ {"ipv6-hop-limits", RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS},
+ {"udp-src-port", RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
+ {"udp-dst-port", RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
+ {"tcp-src-port", RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
+ {"tcp-dst-port", RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
+ {"sctp-src-port", RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
+ {"sctp-dst-port", RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
+ {"sctp-veri-tag", RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG},
+ {"udp-key", RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY},
+ {"gre-key", RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY},
+ {"fld-1st", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD},
+ {"fld-2nd", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD},
+ {"fld-3rd", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD},
+ {"fld-4th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD},
+ {"fld-5th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD},
+ {"fld-6th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD},
+ {"fld-7th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD},
+ {"fld-8th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD},
+ {"none", RTE_ETH_INPUT_SET_NONE},
+ };
+
+ for (i = 0; i < RTE_DIM(inset_table); i++) {
+ if (!strcmp(string, inset_table[i].str))
+ return inset_table[i].inset;
+ }
+
+ return RTE_ETH_INPUT_SET_UNKNOWN;
+}
+
+static void
+cmd_set_hash_input_set_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_hash_input_set_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_INPUT_SET_SELECT;
+ info.info.input_set_conf.flow_type = str2flowtype(res->flow_type);
+ info.info.input_set_conf.field[0] = str2inset(res->inset_field);
+ info.info.input_set_conf.inset_size = 1;
+ if (!strcmp(res->select, "select"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT;
+ else if (!strcmp(res->select, "add"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD;
+ rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+}
+
+cmdline_parse_token_string_t cmd_set_hash_input_set_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ set_hash_input_set, "set_hash_input_set");
+cmdline_parse_token_num_t cmd_set_hash_input_set_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_hash_input_set_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_hash_input_set_flow_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ flow_type,
+ "ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#ipv4-other#"
+ "ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#ipv6-other#l2_payload");
+cmdline_parse_token_string_t cmd_set_hash_input_set_field =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ inset_field,
+ "ovlan#ivlan#src-ipv4#dst-ipv4#src-ipv6#dst-ipv6#"
+ "ipv4-tos#ipv4-proto#ipv6-tc#ipv6-next-header#udp-src-port#"
+ "udp-dst-port#tcp-src-port#tcp-dst-port#sctp-src-port#"
+ "sctp-dst-port#sctp-veri-tag#udp-key#gre-key#fld-1st#"
+ "fld-2nd#fld-3rd#fld-4th#fld-5th#fld-6th#fld-7th#"
+ "fld-8th#none");
+cmdline_parse_token_string_t cmd_set_hash_input_set_select =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ select, "select#add");
+
+cmdline_parse_inst_t cmd_set_hash_input_set = {
+ .f = cmd_set_hash_input_set_parsed,
+ .data = NULL,
+ .help_str = "set_hash_input_set <port_id> "
+ "ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "ovlan|ivlan|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6|ipv4-tos|ipv4-proto|"
+ "ipv6-tc|ipv6-next-header|udp-src-port|udp-dst-port|tcp-src-port|"
+ "tcp-dst-port|sctp-src-port|sctp-dst-port|sctp-veri-tag|udp-key|"
+ "gre-key|fld-1st|fld-2nd|fld-3rd|fld-4th|fld-5th|fld-6th|"
+ "fld-7th|fld-8th|none select|add",
+ .tokens = {
+ (void *)&cmd_set_hash_input_set_cmd,
+ (void *)&cmd_set_hash_input_set_port_id,
+ (void *)&cmd_set_hash_input_set_flow_type,
+ (void *)&cmd_set_hash_input_set_field,
+ (void *)&cmd_set_hash_input_set_select,
+ NULL,
+ },
+};
+
+/* Set flow director input set */
+struct cmd_set_fdir_input_set_result {
+ cmdline_fixed_string_t set_fdir_input_set;
+ uint8_t port_id;
+ cmdline_fixed_string_t flow_type;
+ cmdline_fixed_string_t inset_field;
+ cmdline_fixed_string_t select;
+};
+
+static void
+cmd_set_fdir_input_set_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_fdir_input_set_result *res = parsed_result;
+ struct rte_eth_fdir_filter_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT;
+ info.info.input_set_conf.flow_type = str2flowtype(res->flow_type);
+ info.info.input_set_conf.field[0] = str2inset(res->inset_field);
+ info.info.input_set_conf.inset_size = 1;
+ if (!strcmp(res->select, "select"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT;
+ else if (!strcmp(res->select, "add"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD;
+ rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_SET, &info);
+}
+
+cmdline_parse_token_string_t cmd_set_fdir_input_set_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result,
+ set_fdir_input_set, "set_fdir_input_set");
+cmdline_parse_token_num_t cmd_set_fdir_input_set_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_fdir_input_set_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_fdir_input_set_flow_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result,
+ flow_type,
+ "ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#ipv4-other#"
+ "ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#ipv6-other#l2_payload");
+cmdline_parse_token_string_t cmd_set_fdir_input_set_field =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result,
+ inset_field,
+ "ivlan#ethertype#src-ipv4#dst-ipv4#src-ipv6#dst-ipv6#"
+ "ipv4-tos#ipv4-proto#ipv4-ttl#ipv6-tc#ipv6-next-header#"
+ "ipv6-hop-limits#udp-src-port#udp-dst-port#"
+ "tcp-src-port#tcp-dst-port#sctp-src-port#sctp-dst-port#"
+ "sctp-veri-tag#none");
+cmdline_parse_token_string_t cmd_set_fdir_input_set_select =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result,
+ select, "select#add");
+
+cmdline_parse_inst_t cmd_set_fdir_input_set = {
+ .f = cmd_set_fdir_input_set_parsed,
+ .data = NULL,
+ .help_str = "set_fdir_input_set <port_id> "
+ "ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "ivlan|ethertype|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6|"
+ "ipv4-tos|ipv4-proto|ipv4-ttl|ipv6-tc|ipv6-next-header|"
+ "ipv6-hop-limits|udp-src-port|udp-dst-port|"
+ "tcp-src-port|tcp-dst-port|sctp-src-port|sctp-dst-port|"
+ "sctp-veri-tag|none select|add",
+ .tokens = {
+ (void *)&cmd_set_fdir_input_set_cmd,
+ (void *)&cmd_set_fdir_input_set_port_id,
+ (void *)&cmd_set_fdir_input_set_flow_type,
+ (void *)&cmd_set_fdir_input_set_field,
+ (void *)&cmd_set_fdir_input_set_select,
+ NULL,
+ },
+};
+
+/* *** ADD/REMOVE A MULTICAST MAC ADDRESS TO/FROM A PORT *** */
+struct cmd_mcast_addr_result {
+ cmdline_fixed_string_t mcast_addr_cmd;
+ cmdline_fixed_string_t what;
+ uint8_t port_num;
+ struct ether_addr mc_addr;
+};
+
+static void cmd_mcast_addr_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_mcast_addr_result *res = parsed_result;
+
+ if (!is_multicast_ether_addr(&res->mc_addr)) {
+ printf("Invalid multicast addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ res->mc_addr.addr_bytes[0], res->mc_addr.addr_bytes[1],
+ res->mc_addr.addr_bytes[2], res->mc_addr.addr_bytes[3],
+ res->mc_addr.addr_bytes[4], res->mc_addr.addr_bytes[5]);
+ return;
+ }
+ if (strcmp(res->what, "add") == 0)
+ mcast_addr_add(res->port_num, &res->mc_addr);
+ else
+ mcast_addr_remove(res->port_num, &res->mc_addr);
+}
+
+cmdline_parse_token_string_t cmd_mcast_addr_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_mcast_addr_result,
+ mcast_addr_cmd, "mcast_addr");
+cmdline_parse_token_string_t cmd_mcast_addr_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_mcast_addr_result, what,
+ "add#remove");
+cmdline_parse_token_num_t cmd_mcast_addr_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_mcast_addr_result, port_num, UINT8);
+cmdline_parse_token_etheraddr_t cmd_mcast_addr_addr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_mac_addr_result, address);
+
+cmdline_parse_inst_t cmd_mcast_addr = {
+ .f = cmd_mcast_addr_parsed,
+ .data = (void *)0,
+ .help_str = "mcast_addr add|remove X <mcast_addr>: add/remove multicast MAC address on port X",
+ .tokens = {
+ (void *)&cmd_mcast_addr_cmd,
+ (void *)&cmd_mcast_addr_what,
+ (void *)&cmd_mcast_addr_portnum,
+ (void *)&cmd_mcast_addr_addr,
+ NULL,
+ },
+};
+
+/* l2 tunnel config
+ * only support E-tag now.
+ */
+
+/* Ether type config */
+struct cmd_config_l2_tunnel_eth_type_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ cmdline_fixed_string_t all;
+ uint8_t id;
+ cmdline_fixed_string_t l2_tunnel;
+ cmdline_fixed_string_t l2_tunnel_type;
+ cmdline_fixed_string_t eth_type;
+ uint16_t eth_type_val;
+};
+
+cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_eth_type_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_config =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_eth_type_result,
+ config, "config");
+cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_all_str =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_eth_type_result,
+ all, "all");
+cmdline_parse_token_num_t cmd_config_l2_tunnel_eth_type_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_l2_tunnel_eth_type_result,
+ id, UINT8);
+cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_l2_tunnel =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_eth_type_result,
+ l2_tunnel, "l2-tunnel");
+cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_l2_tunnel_type =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_eth_type_result,
+ l2_tunnel_type, "E-tag");
+cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_eth_type =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_eth_type_result,
+ eth_type, "ether-type");
+cmdline_parse_token_num_t cmd_config_l2_tunnel_eth_type_eth_type_val =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_l2_tunnel_eth_type_result,
+ eth_type_val, UINT16);
+
+static enum rte_eth_tunnel_type
+str2fdir_l2_tunnel_type(char *string)
+{
+ uint32_t i = 0;
+
+ static const struct {
+ char str[32];
+ enum rte_eth_tunnel_type type;
+ } l2_tunnel_type_str[] = {
+ {"E-tag", RTE_L2_TUNNEL_TYPE_E_TAG},
+ };
+
+ for (i = 0; i < RTE_DIM(l2_tunnel_type_str); i++) {
+ if (!strcmp(l2_tunnel_type_str[i].str, string))
+ return l2_tunnel_type_str[i].type;
+ }
+ return RTE_TUNNEL_TYPE_NONE;
+}
+
+/* ether type config for all ports */
+static void
+cmd_config_l2_tunnel_eth_type_all_parsed
+ (void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_l2_tunnel_eth_type_result *res = parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+ portid_t pid;
+
+ entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type);
+ entry.ether_type = res->eth_type_val;
+
+ FOREACH_PORT(pid, ports) {
+ rte_eth_dev_l2_tunnel_eth_type_conf(pid, &entry);
+ }
+}
+
+cmdline_parse_inst_t cmd_config_l2_tunnel_eth_type_all = {
+ .f = cmd_config_l2_tunnel_eth_type_all_parsed,
+ .data = NULL,
+ .help_str = "port config all l2-tunnel ether-type",
+ .tokens = {
+ (void *)&cmd_config_l2_tunnel_eth_type_port,
+ (void *)&cmd_config_l2_tunnel_eth_type_config,
+ (void *)&cmd_config_l2_tunnel_eth_type_all_str,
+ (void *)&cmd_config_l2_tunnel_eth_type_l2_tunnel,
+ (void *)&cmd_config_l2_tunnel_eth_type_l2_tunnel_type,
+ (void *)&cmd_config_l2_tunnel_eth_type_eth_type,
+ (void *)&cmd_config_l2_tunnel_eth_type_eth_type_val,
+ NULL,
+ },
+};
+
+/* ether type config for a specific port */
+static void
+cmd_config_l2_tunnel_eth_type_specific_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_l2_tunnel_eth_type_result *res =
+ parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+
+ if (port_id_is_invalid(res->id, ENABLED_WARN))
+ return;
+
+ entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type);
+ entry.ether_type = res->eth_type_val;
+
+ rte_eth_dev_l2_tunnel_eth_type_conf(res->id, &entry);
+}
+
+cmdline_parse_inst_t cmd_config_l2_tunnel_eth_type_specific = {
+ .f = cmd_config_l2_tunnel_eth_type_specific_parsed,
+ .data = NULL,
+ .help_str = "port config l2-tunnel ether-type",
+ .tokens = {
+ (void *)&cmd_config_l2_tunnel_eth_type_port,
+ (void *)&cmd_config_l2_tunnel_eth_type_config,
+ (void *)&cmd_config_l2_tunnel_eth_type_id,
+ (void *)&cmd_config_l2_tunnel_eth_type_l2_tunnel,
+ (void *)&cmd_config_l2_tunnel_eth_type_l2_tunnel_type,
+ (void *)&cmd_config_l2_tunnel_eth_type_eth_type,
+ (void *)&cmd_config_l2_tunnel_eth_type_eth_type_val,
+ NULL,
+ },
+};
+
+/* Enable/disable l2 tunnel */
+struct cmd_config_l2_tunnel_en_dis_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ cmdline_fixed_string_t all;
+ uint8_t id;
+ cmdline_fixed_string_t l2_tunnel;
+ cmdline_fixed_string_t l2_tunnel_type;
+ cmdline_fixed_string_t en_dis;
+};
+
+cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_en_dis_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_config =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_en_dis_result,
+ config, "config");
+cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_all_str =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_en_dis_result,
+ all, "all");
+cmdline_parse_token_num_t cmd_config_l2_tunnel_en_dis_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_l2_tunnel_en_dis_result,
+ id, UINT8);
+cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_l2_tunnel =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_en_dis_result,
+ l2_tunnel, "l2-tunnel");
+cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_l2_tunnel_type =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_en_dis_result,
+ l2_tunnel_type, "E-tag");
+cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_en_dis =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_l2_tunnel_en_dis_result,
+ en_dis, "enable#disable");
+
+/* enable/disable l2 tunnel for all ports */
+static void
+cmd_config_l2_tunnel_en_dis_all_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_l2_tunnel_en_dis_result *res = parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+ portid_t pid;
+ uint8_t en;
+
+ entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type);
+
+ if (!strcmp("enable", res->en_dis))
+ en = 1;
+ else
+ en = 0;
+
+ FOREACH_PORT(pid, ports) {
+ rte_eth_dev_l2_tunnel_offload_set(pid,
+ &entry,
+ ETH_L2_TUNNEL_ENABLE_MASK,
+ en);
+ }
+}
+
+cmdline_parse_inst_t cmd_config_l2_tunnel_en_dis_all = {
+ .f = cmd_config_l2_tunnel_en_dis_all_parsed,
+ .data = NULL,
+ .help_str = "port config all l2-tunnel enable/disable",
+ .tokens = {
+ (void *)&cmd_config_l2_tunnel_en_dis_port,
+ (void *)&cmd_config_l2_tunnel_en_dis_config,
+ (void *)&cmd_config_l2_tunnel_en_dis_all_str,
+ (void *)&cmd_config_l2_tunnel_en_dis_l2_tunnel,
+ (void *)&cmd_config_l2_tunnel_en_dis_l2_tunnel_type,
+ (void *)&cmd_config_l2_tunnel_en_dis_en_dis,
+ NULL,
+ },
+};
+
+/* enable/disable l2 tunnel for a port */
+static void
+cmd_config_l2_tunnel_en_dis_specific_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_l2_tunnel_en_dis_result *res =
+ parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+
+ if (port_id_is_invalid(res->id, ENABLED_WARN))
+ return;
+
+ entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type);
+
+ if (!strcmp("enable", res->en_dis))
+ rte_eth_dev_l2_tunnel_offload_set(res->id,
+ &entry,
+ ETH_L2_TUNNEL_ENABLE_MASK,
+ 1);
+ else
+ rte_eth_dev_l2_tunnel_offload_set(res->id,
+ &entry,
+ ETH_L2_TUNNEL_ENABLE_MASK,
+ 0);
+}
+
+cmdline_parse_inst_t cmd_config_l2_tunnel_en_dis_specific = {
+ .f = cmd_config_l2_tunnel_en_dis_specific_parsed,
+ .data = NULL,
+ .help_str = "port config l2-tunnel enable/disable",
+ .tokens = {
+ (void *)&cmd_config_l2_tunnel_en_dis_port,
+ (void *)&cmd_config_l2_tunnel_en_dis_config,
+ (void *)&cmd_config_l2_tunnel_en_dis_id,
+ (void *)&cmd_config_l2_tunnel_en_dis_l2_tunnel,
+ (void *)&cmd_config_l2_tunnel_en_dis_l2_tunnel_type,
+ (void *)&cmd_config_l2_tunnel_en_dis_en_dis,
+ NULL,
+ },
+};
+
+/* E-tag configuration */
+
+/* Common result structure for all E-tag configuration */
+struct cmd_config_e_tag_result {
+ cmdline_fixed_string_t e_tag;
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t insertion;
+ cmdline_fixed_string_t stripping;
+ cmdline_fixed_string_t forwarding;
+ cmdline_fixed_string_t filter;
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t del;
+ cmdline_fixed_string_t on;
+ cmdline_fixed_string_t off;
+ cmdline_fixed_string_t on_off;
+ cmdline_fixed_string_t port_tag_id;
+ uint32_t port_tag_id_val;
+ cmdline_fixed_string_t e_tag_id;
+ uint16_t e_tag_id_val;
+ cmdline_fixed_string_t dst_pool;
+ uint8_t dst_pool_val;
+ cmdline_fixed_string_t port;
+ uint8_t port_id;
+ cmdline_fixed_string_t vf;
+ uint8_t vf_id;
+};
+
+/* Common CLI fields for all E-tag configuration */
+cmdline_parse_token_string_t cmd_config_e_tag_e_tag =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ e_tag, "E-tag");
+cmdline_parse_token_string_t cmd_config_e_tag_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_config_e_tag_insertion =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ insertion, "insertion");
+cmdline_parse_token_string_t cmd_config_e_tag_stripping =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ stripping, "stripping");
+cmdline_parse_token_string_t cmd_config_e_tag_forwarding =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ forwarding, "forwarding");
+cmdline_parse_token_string_t cmd_config_e_tag_filter =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ filter, "filter");
+cmdline_parse_token_string_t cmd_config_e_tag_add =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ add, "add");
+cmdline_parse_token_string_t cmd_config_e_tag_del =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ del, "del");
+cmdline_parse_token_string_t cmd_config_e_tag_on =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ on, "on");
+cmdline_parse_token_string_t cmd_config_e_tag_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ off, "off");
+cmdline_parse_token_string_t cmd_config_e_tag_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ on_off, "on#off");
+cmdline_parse_token_string_t cmd_config_e_tag_port_tag_id =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ port_tag_id, "port-tag-id");
+cmdline_parse_token_num_t cmd_config_e_tag_port_tag_id_val =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ port_tag_id_val, UINT32);
+cmdline_parse_token_string_t cmd_config_e_tag_e_tag_id =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ e_tag_id, "e-tag-id");
+cmdline_parse_token_num_t cmd_config_e_tag_e_tag_id_val =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ e_tag_id_val, UINT16);
+cmdline_parse_token_string_t cmd_config_e_tag_dst_pool =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ dst_pool, "dst-pool");
+cmdline_parse_token_num_t cmd_config_e_tag_dst_pool_val =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ dst_pool_val, UINT8);
+cmdline_parse_token_string_t cmd_config_e_tag_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_config_e_tag_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_config_e_tag_vf =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ vf, "vf");
+cmdline_parse_token_num_t cmd_config_e_tag_vf_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_e_tag_result,
+ vf_id, UINT8);
+
+/* E-tag insertion configuration */
+static void
+cmd_config_e_tag_insertion_en_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_e_tag_result *res =
+ parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ entry.tunnel_id = res->port_tag_id_val;
+ entry.vf_id = res->vf_id;
+ rte_eth_dev_l2_tunnel_offload_set(res->port_id,
+ &entry,
+ ETH_L2_TUNNEL_INSERTION_MASK,
+ 1);
+}
+
+static void
+cmd_config_e_tag_insertion_dis_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_e_tag_result *res =
+ parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ entry.vf_id = res->vf_id;
+
+ rte_eth_dev_l2_tunnel_offload_set(res->port_id,
+ &entry,
+ ETH_L2_TUNNEL_INSERTION_MASK,
+ 0);
+}
+
+cmdline_parse_inst_t cmd_config_e_tag_insertion_en = {
+ .f = cmd_config_e_tag_insertion_en_parsed,
+ .data = NULL,
+ .help_str = "E-tag insertion enable",
+ .tokens = {
+ (void *)&cmd_config_e_tag_e_tag,
+ (void *)&cmd_config_e_tag_set,
+ (void *)&cmd_config_e_tag_insertion,
+ (void *)&cmd_config_e_tag_on,
+ (void *)&cmd_config_e_tag_port_tag_id,
+ (void *)&cmd_config_e_tag_port_tag_id_val,
+ (void *)&cmd_config_e_tag_port,
+ (void *)&cmd_config_e_tag_port_id,
+ (void *)&cmd_config_e_tag_vf,
+ (void *)&cmd_config_e_tag_vf_id,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_config_e_tag_insertion_dis = {
+ .f = cmd_config_e_tag_insertion_dis_parsed,
+ .data = NULL,
+ .help_str = "E-tag insertion disable",
+ .tokens = {
+ (void *)&cmd_config_e_tag_e_tag,
+ (void *)&cmd_config_e_tag_set,
+ (void *)&cmd_config_e_tag_insertion,
+ (void *)&cmd_config_e_tag_off,
+ (void *)&cmd_config_e_tag_port,
+ (void *)&cmd_config_e_tag_port_id,
+ (void *)&cmd_config_e_tag_vf,
+ (void *)&cmd_config_e_tag_vf_id,
+ NULL,
+ },
+};
+
+/* E-tag stripping configuration */
+static void
+cmd_config_e_tag_stripping_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_e_tag_result *res =
+ parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+
+ if (!strcmp(res->on_off, "on"))
+ rte_eth_dev_l2_tunnel_offload_set
+ (res->port_id,
+ &entry,
+ ETH_L2_TUNNEL_STRIPPING_MASK,
+ 1);
+ else
+ rte_eth_dev_l2_tunnel_offload_set
+ (res->port_id,
+ &entry,
+ ETH_L2_TUNNEL_STRIPPING_MASK,
+ 0);
+}
+
+cmdline_parse_inst_t cmd_config_e_tag_stripping_en_dis = {
+ .f = cmd_config_e_tag_stripping_parsed,
+ .data = NULL,
+ .help_str = "E-tag stripping enable/disable",
+ .tokens = {
+ (void *)&cmd_config_e_tag_e_tag,
+ (void *)&cmd_config_e_tag_set,
+ (void *)&cmd_config_e_tag_stripping,
+ (void *)&cmd_config_e_tag_on_off,
+ (void *)&cmd_config_e_tag_port,
+ (void *)&cmd_config_e_tag_port_id,
+ NULL,
+ },
+};
+
+/* E-tag forwarding configuration */
+static void
+cmd_config_e_tag_forwarding_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_e_tag_result *res = parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+
+ if (!strcmp(res->on_off, "on"))
+ rte_eth_dev_l2_tunnel_offload_set
+ (res->port_id,
+ &entry,
+ ETH_L2_TUNNEL_FORWARDING_MASK,
+ 1);
+ else
+ rte_eth_dev_l2_tunnel_offload_set
+ (res->port_id,
+ &entry,
+ ETH_L2_TUNNEL_FORWARDING_MASK,
+ 0);
+}
+
+cmdline_parse_inst_t cmd_config_e_tag_forwarding_en_dis = {
+ .f = cmd_config_e_tag_forwarding_parsed,
+ .data = NULL,
+ .help_str = "E-tag forwarding enable/disable",
+ .tokens = {
+ (void *)&cmd_config_e_tag_e_tag,
+ (void *)&cmd_config_e_tag_set,
+ (void *)&cmd_config_e_tag_forwarding,
+ (void *)&cmd_config_e_tag_on_off,
+ (void *)&cmd_config_e_tag_port,
+ (void *)&cmd_config_e_tag_port_id,
+ NULL,
+ },
+};
+
+/* E-tag filter configuration */
+static void
+cmd_config_e_tag_filter_add_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_e_tag_result *res = parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+ int ret = 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ if (res->e_tag_id_val > 0x3fff) {
+ printf("e-tag-id must be equal or less than 0x3fff.\n");
+ return;
+ }
+
+ ret = rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_L2_TUNNEL);
+ if (ret < 0) {
+ printf("E-tag filter is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+
+ entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ entry.tunnel_id = res->e_tag_id_val;
+ entry.pool = res->dst_pool_val;
+
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_L2_TUNNEL,
+ RTE_ETH_FILTER_ADD,
+ &entry);
+ if (ret < 0)
+ printf("E-tag filter programming error: (%s)\n",
+ strerror(-ret));
+}
+
+cmdline_parse_inst_t cmd_config_e_tag_filter_add = {
+ .f = cmd_config_e_tag_filter_add_parsed,
+ .data = NULL,
+ .help_str = "E-tag filter add",
+ .tokens = {
+ (void *)&cmd_config_e_tag_e_tag,
+ (void *)&cmd_config_e_tag_set,
+ (void *)&cmd_config_e_tag_filter,
+ (void *)&cmd_config_e_tag_add,
+ (void *)&cmd_config_e_tag_e_tag_id,
+ (void *)&cmd_config_e_tag_e_tag_id_val,
+ (void *)&cmd_config_e_tag_dst_pool,
+ (void *)&cmd_config_e_tag_dst_pool_val,
+ (void *)&cmd_config_e_tag_port,
+ (void *)&cmd_config_e_tag_port_id,
+ NULL,
+ },
+};
+
+static void
+cmd_config_e_tag_filter_del_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_e_tag_result *res = parsed_result;
+ struct rte_eth_l2_tunnel_conf entry;
+ int ret = 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ if (res->e_tag_id_val > 0x3fff) {
+ printf("e-tag-id must be less than 0x3fff.\n");
+ return;
+ }
+
+ ret = rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_L2_TUNNEL);
+ if (ret < 0) {
+ printf("E-tag filter is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+
+ entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ entry.tunnel_id = res->e_tag_id_val;
+
+ ret = rte_eth_dev_filter_ctrl(res->port_id,
+ RTE_ETH_FILTER_L2_TUNNEL,
+ RTE_ETH_FILTER_DELETE,
+ &entry);
+ if (ret < 0)
+ printf("E-tag filter programming error: (%s)\n",
+ strerror(-ret));
+}
+
+cmdline_parse_inst_t cmd_config_e_tag_filter_del = {
+ .f = cmd_config_e_tag_filter_del_parsed,
+ .data = NULL,
+ .help_str = "E-tag filter delete",
+ .tokens = {
+ (void *)&cmd_config_e_tag_e_tag,
+ (void *)&cmd_config_e_tag_set,
+ (void *)&cmd_config_e_tag_filter,
+ (void *)&cmd_config_e_tag_del,
+ (void *)&cmd_config_e_tag_e_tag_id,
+ (void *)&cmd_config_e_tag_e_tag_id_val,
+ (void *)&cmd_config_e_tag_port,
+ (void *)&cmd_config_e_tag_port_id,
+ NULL,
+ },
+};
+
+/* ******************************************************************************** */
+
+/* list of instructions */
+cmdline_parse_ctx_t main_ctx[] = {
+ (cmdline_parse_inst_t *)&cmd_help_brief,
+ (cmdline_parse_inst_t *)&cmd_help_long,
+ (cmdline_parse_inst_t *)&cmd_quit,
+ (cmdline_parse_inst_t *)&cmd_showport,
+ (cmdline_parse_inst_t *)&cmd_showqueue,
+ (cmdline_parse_inst_t *)&cmd_showportall,
+ (cmdline_parse_inst_t *)&cmd_showcfg,
+ (cmdline_parse_inst_t *)&cmd_start,
+ (cmdline_parse_inst_t *)&cmd_start_tx_first,
+ (cmdline_parse_inst_t *)&cmd_set_link_up,
+ (cmdline_parse_inst_t *)&cmd_set_link_down,
+ (cmdline_parse_inst_t *)&cmd_reset,
+ (cmdline_parse_inst_t *)&cmd_set_numbers,
+ (cmdline_parse_inst_t *)&cmd_set_txpkts,
+ (cmdline_parse_inst_t *)&cmd_set_txsplit,
+ (cmdline_parse_inst_t *)&cmd_set_fwd_list,
+ (cmdline_parse_inst_t *)&cmd_set_fwd_mask,
+ (cmdline_parse_inst_t *)&cmd_set_fwd_mode,
+ (cmdline_parse_inst_t *)&cmd_set_burst_tx_retry,
+ (cmdline_parse_inst_t *)&cmd_set_promisc_mode_one,
+ (cmdline_parse_inst_t *)&cmd_set_promisc_mode_all,
+ (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_one,
+ (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_all,
+ (cmdline_parse_inst_t *)&cmd_set_flush_rx,
+ (cmdline_parse_inst_t *)&cmd_set_link_check,
+#ifdef RTE_NIC_BYPASS
+ (cmdline_parse_inst_t *)&cmd_set_bypass_mode,
+ (cmdline_parse_inst_t *)&cmd_set_bypass_event,
+ (cmdline_parse_inst_t *)&cmd_set_bypass_timeout,
+ (cmdline_parse_inst_t *)&cmd_show_bypass_config,
+#endif
+#ifdef RTE_LIBRTE_PMD_BOND
+ (cmdline_parse_inst_t *) &cmd_set_bonding_mode,
+ (cmdline_parse_inst_t *) &cmd_show_bonding_config,
+ (cmdline_parse_inst_t *) &cmd_set_bonding_primary,
+ (cmdline_parse_inst_t *) &cmd_add_bonding_slave,
+ (cmdline_parse_inst_t *) &cmd_remove_bonding_slave,
+ (cmdline_parse_inst_t *) &cmd_create_bonded_device,
+ (cmdline_parse_inst_t *) &cmd_set_bond_mac_addr,
+ (cmdline_parse_inst_t *) &cmd_set_balance_xmit_policy,
+ (cmdline_parse_inst_t *) &cmd_set_bond_mon_period,
+#endif
+ (cmdline_parse_inst_t *)&cmd_vlan_offload,
+ (cmdline_parse_inst_t *)&cmd_vlan_tpid,
+ (cmdline_parse_inst_t *)&cmd_rx_vlan_filter_all,
+ (cmdline_parse_inst_t *)&cmd_rx_vlan_filter,
+ (cmdline_parse_inst_t *)&cmd_tx_vlan_set,
+ (cmdline_parse_inst_t *)&cmd_tx_vlan_set_qinq,
+ (cmdline_parse_inst_t *)&cmd_tx_vlan_reset,
+ (cmdline_parse_inst_t *)&cmd_tx_vlan_set_pvid,
+ (cmdline_parse_inst_t *)&cmd_csum_set,
+ (cmdline_parse_inst_t *)&cmd_csum_show,
+ (cmdline_parse_inst_t *)&cmd_csum_tunnel,
+ (cmdline_parse_inst_t *)&cmd_tso_set,
+ (cmdline_parse_inst_t *)&cmd_tso_show,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set_hw,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set_lw,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set_pt,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set_xon,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set_macfwd,
+ (cmdline_parse_inst_t *)&cmd_link_flow_control_set_autoneg,
+ (cmdline_parse_inst_t *)&cmd_priority_flow_control_set,
+ (cmdline_parse_inst_t *)&cmd_config_dcb,
+ (cmdline_parse_inst_t *)&cmd_read_reg,
+ (cmdline_parse_inst_t *)&cmd_read_reg_bit_field,
+ (cmdline_parse_inst_t *)&cmd_read_reg_bit,
+ (cmdline_parse_inst_t *)&cmd_write_reg,
+ (cmdline_parse_inst_t *)&cmd_write_reg_bit_field,
+ (cmdline_parse_inst_t *)&cmd_write_reg_bit,
+ (cmdline_parse_inst_t *)&cmd_read_rxd_txd,
+ (cmdline_parse_inst_t *)&cmd_stop,
+ (cmdline_parse_inst_t *)&cmd_mac_addr,
+ (cmdline_parse_inst_t *)&cmd_set_qmap,
+ (cmdline_parse_inst_t *)&cmd_operate_port,
+ (cmdline_parse_inst_t *)&cmd_operate_specific_port,
+ (cmdline_parse_inst_t *)&cmd_operate_attach_port,
+ (cmdline_parse_inst_t *)&cmd_operate_detach_port,
+ (cmdline_parse_inst_t *)&cmd_config_speed_all,
+ (cmdline_parse_inst_t *)&cmd_config_speed_specific,
+ (cmdline_parse_inst_t *)&cmd_config_rx_tx,
+ (cmdline_parse_inst_t *)&cmd_config_mtu,
+ (cmdline_parse_inst_t *)&cmd_config_max_pkt_len,
+ (cmdline_parse_inst_t *)&cmd_config_rx_mode_flag,
+ (cmdline_parse_inst_t *)&cmd_config_rss,
+ (cmdline_parse_inst_t *)&cmd_config_rxtx_queue,
+ (cmdline_parse_inst_t *)&cmd_config_rss_reta,
+ (cmdline_parse_inst_t *)&cmd_showport_reta,
+ (cmdline_parse_inst_t *)&cmd_config_burst,
+ (cmdline_parse_inst_t *)&cmd_config_thresh,
+ (cmdline_parse_inst_t *)&cmd_config_threshold,
+ (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
+ (cmdline_parse_inst_t *)&cmd_set_uc_hash_filter,
+ (cmdline_parse_inst_t *)&cmd_set_uc_all_hash_filter,
+ (cmdline_parse_inst_t *)&cmd_vf_mac_addr_filter,
+ (cmdline_parse_inst_t *)&cmd_set_vf_macvlan_filter,
+ (cmdline_parse_inst_t *)&cmd_set_vf_traffic,
+ (cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
+ (cmdline_parse_inst_t *)&cmd_queue_rate_limit,
+ (cmdline_parse_inst_t *)&cmd_vf_rate_limit,
+ (cmdline_parse_inst_t *)&cmd_tunnel_filter,
+ (cmdline_parse_inst_t *)&cmd_tunnel_udp_config,
+ (cmdline_parse_inst_t *)&cmd_global_config,
+ (cmdline_parse_inst_t *)&cmd_set_mirror_mask,
+ (cmdline_parse_inst_t *)&cmd_set_mirror_link,
+ (cmdline_parse_inst_t *)&cmd_reset_mirror_rule,
+ (cmdline_parse_inst_t *)&cmd_showport_rss_hash,
+ (cmdline_parse_inst_t *)&cmd_showport_rss_hash_key,
+ (cmdline_parse_inst_t *)&cmd_config_rss_hash_key,
+ (cmdline_parse_inst_t *)&cmd_dump,
+ (cmdline_parse_inst_t *)&cmd_dump_one,
+ (cmdline_parse_inst_t *)&cmd_ethertype_filter,
+ (cmdline_parse_inst_t *)&cmd_syn_filter,
+ (cmdline_parse_inst_t *)&cmd_2tuple_filter,
+ (cmdline_parse_inst_t *)&cmd_5tuple_filter,
+ (cmdline_parse_inst_t *)&cmd_flex_filter,
+ (cmdline_parse_inst_t *)&cmd_add_del_ip_flow_director,
+ (cmdline_parse_inst_t *)&cmd_add_del_udp_flow_director,
+ (cmdline_parse_inst_t *)&cmd_add_del_sctp_flow_director,
+ (cmdline_parse_inst_t *)&cmd_add_del_l2_flow_director,
+ (cmdline_parse_inst_t *)&cmd_add_del_mac_vlan_flow_director,
+ (cmdline_parse_inst_t *)&cmd_add_del_tunnel_flow_director,
+ (cmdline_parse_inst_t *)&cmd_flush_flow_director,
+ (cmdline_parse_inst_t *)&cmd_set_flow_director_ip_mask,
+ (cmdline_parse_inst_t *)&cmd_set_flow_director_mac_vlan_mask,
+ (cmdline_parse_inst_t *)&cmd_set_flow_director_tunnel_mask,
+ (cmdline_parse_inst_t *)&cmd_set_flow_director_flex_mask,
+ (cmdline_parse_inst_t *)&cmd_set_flow_director_flex_payload,
+ (cmdline_parse_inst_t *)&cmd_get_sym_hash_ena_per_port,
+ (cmdline_parse_inst_t *)&cmd_set_sym_hash_ena_per_port,
+ (cmdline_parse_inst_t *)&cmd_get_hash_global_config,
+ (cmdline_parse_inst_t *)&cmd_set_hash_global_config,
+ (cmdline_parse_inst_t *)&cmd_set_hash_input_set,
+ (cmdline_parse_inst_t *)&cmd_set_fdir_input_set,
+ (cmdline_parse_inst_t *)&cmd_mcast_addr,
+ (cmdline_parse_inst_t *)&cmd_config_l2_tunnel_eth_type_all,
+ (cmdline_parse_inst_t *)&cmd_config_l2_tunnel_eth_type_specific,
+ (cmdline_parse_inst_t *)&cmd_config_l2_tunnel_en_dis_all,
+ (cmdline_parse_inst_t *)&cmd_config_l2_tunnel_en_dis_specific,
+ (cmdline_parse_inst_t *)&cmd_config_e_tag_insertion_en,
+ (cmdline_parse_inst_t *)&cmd_config_e_tag_insertion_dis,
+ (cmdline_parse_inst_t *)&cmd_config_e_tag_stripping_en_dis,
+ (cmdline_parse_inst_t *)&cmd_config_e_tag_forwarding_en_dis,
+ (cmdline_parse_inst_t *)&cmd_config_e_tag_filter_add,
+ (cmdline_parse_inst_t *)&cmd_config_e_tag_filter_del,
+ NULL,
+};
+
+/* prompt function, called from main on MASTER lcore */
+void
+prompt(void)
+{
+ /* initialize non-constant commands */
+ cmd_set_fwd_mode_init();
+
+ testpmd_cl = cmdline_stdin_new(main_ctx, "testpmd> ");
+ if (testpmd_cl == NULL)
+ return;
+ cmdline_interact(testpmd_cl);
+ cmdline_stdin_exit(testpmd_cl);
+}
+
+void
+prompt_exit(void)
+{
+ if (testpmd_cl != NULL)
+ cmdline_quit(testpmd_cl);
+}
+
+static void
+cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue)
+{
+ if (id == (portid_t)RTE_PORT_ALL) {
+ portid_t pid;
+
+ FOREACH_PORT(pid, ports) {
+ /* check if need_reconfig has been set to 1 */
+ if (ports[pid].need_reconfig == 0)
+ ports[pid].need_reconfig = dev;
+ /* check if need_reconfig_queues has been set to 1 */
+ if (ports[pid].need_reconfig_queues == 0)
+ ports[pid].need_reconfig_queues = queue;
+ }
+ } else if (!port_id_is_invalid(id, DISABLED_WARN)) {
+ /* check if need_reconfig has been set to 1 */
+ if (ports[id].need_reconfig == 0)
+ ports[id].need_reconfig = dev;
+ /* check if need_reconfig_queues has been set to 1 */
+ if (ports[id].need_reconfig_queues == 0)
+ ports[id].need_reconfig_queues = queue;
+ }
+}
+
+#ifdef RTE_NIC_BYPASS
+#include <rte_pci_dev_ids.h>
+uint8_t
+bypass_is_supported(portid_t port_id)
+{
+ struct rte_port *port;
+ struct rte_pci_id *pci_id;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return 0;
+
+ /* Get the device id. */
+ port = &ports[port_id];
+ pci_id = &port->dev_info.pci_dev->id;
+
+ /* Check if NIC supports bypass. */
+ if (pci_id->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ return 1;
+ }
+ else {
+ printf("\tBypass not supported for port_id = %d.\n", port_id);
+ return 0;
+ }
+}
+#endif
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
new file mode 100644
index 00000000..b1bbec6d
--- /dev/null
+++ b/app/test-pmd/config.c
@@ -0,0 +1,2481 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* BSD LICENSE
+ *
+ * Copyright 2013-2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+static char *flowtype_to_str(uint16_t flow_type);
+
+static const struct {
+ enum tx_pkt_split split;
+ const char *name;
+} tx_split_name[] = {
+ {
+ .split = TX_PKT_SPLIT_OFF,
+ .name = "off",
+ },
+ {
+ .split = TX_PKT_SPLIT_ON,
+ .name = "on",
+ },
+ {
+ .split = TX_PKT_SPLIT_RND,
+ .name = "rand",
+ },
+};
+
+struct rss_type_info {
+ char str[32];
+ uint64_t rss_type;
+};
+
+static const struct rss_type_info rss_type_table[] = {
+ { "ipv4", ETH_RSS_IPV4 },
+ { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
+ { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
+ { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
+ { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
+ { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
+ { "ipv6", ETH_RSS_IPV6 },
+ { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
+ { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
+ { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
+ { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
+ { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
+ { "l2-payload", ETH_RSS_L2_PAYLOAD },
+ { "ipv6-ex", ETH_RSS_IPV6_EX },
+ { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
+ { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
+};
+
+static void
+print_ethaddr(const char *name, struct ether_addr *eth_addr)
+{
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", name, buf);
+}
+
+void
+nic_stats_display(portid_t port_id)
+{
+ struct rte_eth_stats stats;
+ struct rte_port *port = &ports[port_id];
+ uint8_t i;
+ portid_t pid;
+
+ static const char *nic_stats_border = "########################";
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ printf("Valid port range is [0");
+ FOREACH_PORT(pid, ports)
+ printf(", %d", pid);
+ printf("]\n");
+ return;
+ }
+ rte_eth_stats_get(port_id, &stats);
+ printf("\n %s NIC statistics for port %-2d %s\n",
+ nic_stats_border, port_id, nic_stats_border);
+
+ if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
+ printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
+ "%-"PRIu64"\n",
+ stats.ipackets, stats.imissed, stats.ibytes);
+ printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
+ printf(" RX-nombuf: %-10"PRIu64"\n",
+ stats.rx_nombuf);
+ printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
+ "%-"PRIu64"\n",
+ stats.opackets, stats.oerrors, stats.obytes);
+ }
+ else {
+ printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
+ " RX-bytes: %10"PRIu64"\n",
+ stats.ipackets, stats.ierrors, stats.ibytes);
+ printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
+ printf(" RX-nombuf: %10"PRIu64"\n",
+ stats.rx_nombuf);
+ printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
+ " TX-bytes: %10"PRIu64"\n",
+ stats.opackets, stats.oerrors, stats.obytes);
+ }
+
+ if (port->rx_queue_stats_mapping_enabled) {
+ printf("\n");
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ printf(" Stats reg %2d RX-packets: %10"PRIu64
+ " RX-errors: %10"PRIu64
+ " RX-bytes: %10"PRIu64"\n",
+ i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
+ }
+ }
+ if (port->tx_queue_stats_mapping_enabled) {
+ printf("\n");
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ printf(" Stats reg %2d TX-packets: %10"PRIu64
+ " TX-bytes: %10"PRIu64"\n",
+ i, stats.q_opackets[i], stats.q_obytes[i]);
+ }
+ }
+
+ printf(" %s############################%s\n",
+ nic_stats_border, nic_stats_border);
+}
+
+void
+nic_stats_clear(portid_t port_id)
+{
+ portid_t pid;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ printf("Valid port range is [0");
+ FOREACH_PORT(pid, ports)
+ printf(", %d", pid);
+ printf("]\n");
+ return;
+ }
+ rte_eth_stats_reset(port_id);
+ printf("\n NIC statistics for port %d cleared\n", port_id);
+}
+
+void
+nic_xstats_display(portid_t port_id)
+{
+ struct rte_eth_xstats *xstats;
+ int len, ret, i;
+
+ printf("###### NIC extended statistics for port %-2d\n", port_id);
+
+ len = rte_eth_xstats_get(port_id, NULL, 0);
+ if (len < 0) {
+ printf("Cannot get xstats count\n");
+ return;
+ }
+ xstats = malloc(sizeof(xstats[0]) * len);
+ if (xstats == NULL) {
+ printf("Cannot allocate memory for xstats\n");
+ return;
+ }
+ ret = rte_eth_xstats_get(port_id, xstats, len);
+ if (ret < 0 || ret > len) {
+ printf("Cannot get xstats\n");
+ free(xstats);
+ return;
+ }
+ for (i = 0; i < len; i++)
+ printf("%s: %"PRIu64"\n", xstats[i].name, xstats[i].value);
+ free(xstats);
+}
+
+void
+nic_xstats_clear(portid_t port_id)
+{
+ rte_eth_xstats_reset(port_id);
+}
+
+void
+nic_stats_mapping_display(portid_t port_id)
+{
+ struct rte_port *port = &ports[port_id];
+ uint16_t i;
+ portid_t pid;
+
+ static const char *nic_stats_mapping_border = "########################";
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ printf("Valid port range is [0");
+ FOREACH_PORT(pid, ports)
+ printf(", %d", pid);
+ printf("]\n");
+ return;
+ }
+
+ if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
+ printf("Port id %d - either does not support queue statistic mapping or"
+ " no queue statistic mapping set\n", port_id);
+ return;
+ }
+
+ printf("\n %s NIC statistics mapping for port %-2d %s\n",
+ nic_stats_mapping_border, port_id, nic_stats_mapping_border);
+
+ if (port->rx_queue_stats_mapping_enabled) {
+ for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
+ if (rx_queue_stats_mappings[i].port_id == port_id) {
+ printf(" RX-queue %2d mapped to Stats Reg %2d\n",
+ rx_queue_stats_mappings[i].queue_id,
+ rx_queue_stats_mappings[i].stats_counter_id);
+ }
+ }
+ printf("\n");
+ }
+
+
+ if (port->tx_queue_stats_mapping_enabled) {
+ for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
+ if (tx_queue_stats_mappings[i].port_id == port_id) {
+ printf(" TX-queue %2d mapped to Stats Reg %2d\n",
+ tx_queue_stats_mappings[i].queue_id,
+ tx_queue_stats_mappings[i].stats_counter_id);
+ }
+ }
+ }
+
+ printf(" %s####################################%s\n",
+ nic_stats_mapping_border, nic_stats_mapping_border);
+}
+
+void
+rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_rxq_info qinfo;
+ int32_t rc;
+ static const char *info_border = "*********************";
+
+ rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
+ if (rc != 0) {
+ printf("Failed to retrieve information for port: %hhu, "
+ "RX queue: %hu\nerror desc: %s(%d)\n",
+ port_id, queue_id, strerror(-rc), rc);
+ return;
+ }
+
+ printf("\n%s Infos for port %-2u, RX queue %-2u %s",
+ info_border, port_id, queue_id, info_border);
+
+ printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
+ printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
+ printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
+ printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
+ printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
+ printf("\nRX drop packets: %s",
+ (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
+ printf("\nRX deferred start: %s",
+ (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
+ printf("\nRX scattered packets: %s",
+ (qinfo.scattered_rx != 0) ? "on" : "off");
+ printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
+ printf("\n");
+}
+
+void
+tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_txq_info qinfo;
+ int32_t rc;
+ static const char *info_border = "*********************";
+
+ rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
+ if (rc != 0) {
+ printf("Failed to retrieve information for port: %hhu, "
+ "TX queue: %hu\nerror desc: %s(%d)\n",
+ port_id, queue_id, strerror(-rc), rc);
+ return;
+ }
+
+ printf("\n%s Infos for port %-2u, TX queue %-2u %s",
+ info_border, port_id, queue_id, info_border);
+
+ printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
+ printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
+ printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
+ printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
+ printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
+ printf("\nTX flags: %#x", qinfo.conf.txq_flags);
+ printf("\nTX deferred start: %s",
+ (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
+ printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
+ printf("\n");
+}
+
+void
+port_infos_display(portid_t port_id)
+{
+ struct rte_port *port;
+ struct ether_addr mac_addr;
+ struct rte_eth_link link;
+ struct rte_eth_dev_info dev_info;
+ int vlan_offload;
+ struct rte_mempool * mp;
+ static const char *info_border = "*********************";
+ portid_t pid;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ printf("Valid port range is [0");
+ FOREACH_PORT(pid, ports)
+ printf(", %d", pid);
+ printf("]\n");
+ return;
+ }
+ port = &ports[port_id];
+ rte_eth_link_get_nowait(port_id, &link);
+ printf("\n%s Infos for port %-2d %s\n",
+ info_border, port_id, info_border);
+ rte_eth_macaddr_get(port_id, &mac_addr);
+ print_ethaddr("MAC address: ", &mac_addr);
+ printf("\nConnect to socket: %u", port->socket_id);
+
+ if (port_numa[port_id] != NUMA_NO_CONFIG) {
+ mp = mbuf_pool_find(port_numa[port_id]);
+ if (mp)
+ printf("\nmemory allocation on the socket: %d",
+ port_numa[port_id]);
+ } else
+ printf("\nmemory allocation on the socket: %u",port->socket_id);
+
+ printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
+ printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
+ printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex"));
+ printf("Promiscuous mode: %s\n",
+ rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
+ printf("Allmulticast mode: %s\n",
+ rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
+ printf("Maximum number of MAC addresses: %u\n",
+ (unsigned int)(port->dev_info.max_mac_addrs));
+ printf("Maximum number of MAC addresses of hash filtering: %u\n",
+ (unsigned int)(port->dev_info.max_hash_mac_addrs));
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+ if (vlan_offload >= 0){
+ printf("VLAN offload: \n");
+ if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
+ printf(" strip on \n");
+ else
+ printf(" strip off \n");
+
+ if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
+ printf(" filter on \n");
+ else
+ printf(" filter off \n");
+
+ if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
+ printf(" qinq(extend) on \n");
+ else
+ printf(" qinq(extend) off \n");
+ }
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (dev_info.hash_key_size > 0)
+ printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
+ if (dev_info.reta_size > 0)
+ printf("Redirection table size: %u\n", dev_info.reta_size);
+ if (!dev_info.flow_type_rss_offloads)
+ printf("No flow type is supported.\n");
+ else {
+ uint16_t i;
+ char *p;
+
+ printf("Supported flow types:\n");
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX;
+ i++) {
+ if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
+ continue;
+ p = flowtype_to_str(i);
+ printf(" %s\n", (p ? p : "unknown"));
+ }
+ }
+
+ printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
+ printf("Max possible number of RXDs per queue: %hu\n",
+ dev_info.rx_desc_lim.nb_max);
+ printf("Min possible number of RXDs per queue: %hu\n",
+ dev_info.rx_desc_lim.nb_min);
+ printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
+
+ printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
+ printf("Max possible number of TXDs per queue: %hu\n",
+ dev_info.tx_desc_lim.nb_max);
+ printf("Min possible number of TXDs per queue: %hu\n",
+ dev_info.tx_desc_lim.nb_min);
+ printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
+}
+
+int
+port_id_is_invalid(portid_t port_id, enum print_warning warning)
+{
+ if (port_id == (portid_t)RTE_PORT_ALL)
+ return 0;
+
+ if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled)
+ return 0;
+
+ if (warning == ENABLED_WARN)
+ printf("Invalid port %d\n", port_id);
+
+ return 1;
+}
+
+static int
+vlan_id_is_invalid(uint16_t vlan_id)
+{
+ if (vlan_id < 4096)
+ return 0;
+ printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
+ return 1;
+}
+
+static int
+port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
+{
+ uint64_t pci_len;
+
+ if (reg_off & 0x3) {
+ printf("Port register offset 0x%X not aligned on a 4-byte "
+ "boundary\n",
+ (unsigned)reg_off);
+ return 1;
+ }
+ pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
+ if (reg_off >= pci_len) {
+ printf("Port %d: register offset %u (0x%X) out of port PCI "
+ "resource (length=%"PRIu64")\n",
+ port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+reg_bit_pos_is_invalid(uint8_t bit_pos)
+{
+ if (bit_pos <= 31)
+ return 0;
+ printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
+ return 1;
+}
+
+#define display_port_and_reg_off(port_id, reg_off) \
+ printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
+
+static inline void
+display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
+{
+ display_port_and_reg_off(port_id, (unsigned)reg_off);
+ printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
+}
+
+void
+port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
+{
+ uint32_t reg_v;
+
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (port_reg_off_is_invalid(port_id, reg_off))
+ return;
+ if (reg_bit_pos_is_invalid(bit_x))
+ return;
+ reg_v = port_id_pci_reg_read(port_id, reg_off);
+ display_port_and_reg_off(port_id, (unsigned)reg_off);
+ printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
+}
+
+void
+port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
+ uint8_t bit1_pos, uint8_t bit2_pos)
+{
+ uint32_t reg_v;
+ uint8_t l_bit;
+ uint8_t h_bit;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (port_reg_off_is_invalid(port_id, reg_off))
+ return;
+ if (reg_bit_pos_is_invalid(bit1_pos))
+ return;
+ if (reg_bit_pos_is_invalid(bit2_pos))
+ return;
+ if (bit1_pos > bit2_pos)
+ l_bit = bit2_pos, h_bit = bit1_pos;
+ else
+ l_bit = bit1_pos, h_bit = bit2_pos;
+
+ reg_v = port_id_pci_reg_read(port_id, reg_off);
+ reg_v >>= l_bit;
+ if (h_bit < 31)
+ reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
+ display_port_and_reg_off(port_id, (unsigned)reg_off);
+ printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
+ ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
+}
+
+void
+port_reg_display(portid_t port_id, uint32_t reg_off)
+{
+ uint32_t reg_v;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (port_reg_off_is_invalid(port_id, reg_off))
+ return;
+ reg_v = port_id_pci_reg_read(port_id, reg_off);
+ display_port_reg_value(port_id, reg_off, reg_v);
+}
+
+void
+port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
+ uint8_t bit_v)
+{
+ uint32_t reg_v;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (port_reg_off_is_invalid(port_id, reg_off))
+ return;
+ if (reg_bit_pos_is_invalid(bit_pos))
+ return;
+ if (bit_v > 1) {
+ printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
+ return;
+ }
+ reg_v = port_id_pci_reg_read(port_id, reg_off);
+ if (bit_v == 0)
+ reg_v &= ~(1 << bit_pos);
+ else
+ reg_v |= (1 << bit_pos);
+ port_id_pci_reg_write(port_id, reg_off, reg_v);
+ display_port_reg_value(port_id, reg_off, reg_v);
+}
+
+void
+port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
+ uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
+{
+ uint32_t max_v;
+ uint32_t reg_v;
+ uint8_t l_bit;
+ uint8_t h_bit;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (port_reg_off_is_invalid(port_id, reg_off))
+ return;
+ if (reg_bit_pos_is_invalid(bit1_pos))
+ return;
+ if (reg_bit_pos_is_invalid(bit2_pos))
+ return;
+ if (bit1_pos > bit2_pos)
+ l_bit = bit2_pos, h_bit = bit1_pos;
+ else
+ l_bit = bit1_pos, h_bit = bit2_pos;
+
+ if ((h_bit - l_bit) < 31)
+ max_v = (1 << (h_bit - l_bit + 1)) - 1;
+ else
+ max_v = 0xFFFFFFFF;
+
+ if (value > max_v) {
+ printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
+ (unsigned)value, (unsigned)value,
+ (unsigned)max_v, (unsigned)max_v);
+ return;
+ }
+ reg_v = port_id_pci_reg_read(port_id, reg_off);
+ reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
+ reg_v |= (value << l_bit); /* Set changed bits */
+ port_id_pci_reg_write(port_id, reg_off, reg_v);
+ display_port_reg_value(port_id, reg_off, reg_v);
+}
+
+void
+port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
+{
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (port_reg_off_is_invalid(port_id, reg_off))
+ return;
+ port_id_pci_reg_write(port_id, reg_off, reg_v);
+ display_port_reg_value(port_id, reg_off, reg_v);
+}
+
+void
+port_mtu_set(portid_t port_id, uint16_t mtu)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ diag = rte_eth_dev_set_mtu(port_id, mtu);
+ if (diag == 0)
+ return;
+ printf("Set MTU failed. diag=%d\n", diag);
+}
+
+/*
+ * RX/TX ring descriptors display functions.
+ */
+int
+rx_queue_id_is_invalid(queueid_t rxq_id)
+{
+ if (rxq_id < nb_rxq)
+ return 0;
+ printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
+ return 1;
+}
+
+int
+tx_queue_id_is_invalid(queueid_t txq_id)
+{
+ if (txq_id < nb_txq)
+ return 0;
+ printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
+ return 1;
+}
+
+static int
+rx_desc_id_is_invalid(uint16_t rxdesc_id)
+{
+ if (rxdesc_id < nb_rxd)
+ return 0;
+ printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
+ rxdesc_id, nb_rxd);
+ return 1;
+}
+
+static int
+tx_desc_id_is_invalid(uint16_t txdesc_id)
+{
+ if (txdesc_id < nb_txd)
+ return 0;
+ printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
+ txdesc_id, nb_txd);
+ return 1;
+}
+
+static const struct rte_memzone *
+ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
+ ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL)
+ printf("%s ring memory zoneof (port %d, queue %d) not"
+ "found (zone name = %s\n",
+ ring_name, port_id, q_id, mz_name);
+ return mz;
+}
+
+union igb_ring_dword {
+ uint64_t dword;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint32_t lo;
+ uint32_t hi;
+#else
+ uint32_t hi;
+ uint32_t lo;
+#endif
+ } words;
+};
+
+struct igb_ring_desc_32_bytes {
+ union igb_ring_dword lo_dword;
+ union igb_ring_dword hi_dword;
+ union igb_ring_dword resv1;
+ union igb_ring_dword resv2;
+};
+
+struct igb_ring_desc_16_bytes {
+ union igb_ring_dword lo_dword;
+ union igb_ring_dword hi_dword;
+};
+
+static void
+ring_rxd_display_dword(union igb_ring_dword dword)
+{
+ printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
+ (unsigned)dword.words.hi);
+}
+
+static void
+ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ uint8_t port_id,
+#else
+ __rte_unused uint8_t port_id,
+#endif
+ uint16_t desc_id)
+{
+ struct igb_ring_desc_16_bytes *ring =
+ (struct igb_ring_desc_16_bytes *)ring_mz->addr;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ struct rte_eth_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (strstr(dev_info.driver_name, "i40e") != NULL) {
+ /* 32 bytes RX descriptor, i40e only */
+ struct igb_ring_desc_32_bytes *ring =
+ (struct igb_ring_desc_32_bytes *)ring_mz->addr;
+ ring[desc_id].lo_dword.dword =
+ rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
+ ring_rxd_display_dword(ring[desc_id].lo_dword);
+ ring[desc_id].hi_dword.dword =
+ rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
+ ring_rxd_display_dword(ring[desc_id].hi_dword);
+ ring[desc_id].resv1.dword =
+ rte_le_to_cpu_64(ring[desc_id].resv1.dword);
+ ring_rxd_display_dword(ring[desc_id].resv1);
+ ring[desc_id].resv2.dword =
+ rte_le_to_cpu_64(ring[desc_id].resv2.dword);
+ ring_rxd_display_dword(ring[desc_id].resv2);
+
+ return;
+ }
+#endif
+ /* 16 bytes RX descriptor */
+ ring[desc_id].lo_dword.dword =
+ rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
+ ring_rxd_display_dword(ring[desc_id].lo_dword);
+ ring[desc_id].hi_dword.dword =
+ rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
+ ring_rxd_display_dword(ring[desc_id].hi_dword);
+}
+
+static void
+ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
+{
+ struct igb_ring_desc_16_bytes *ring;
+ struct igb_ring_desc_16_bytes txd;
+
+ ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
+ txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
+ txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
+ printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
+ (unsigned)txd.lo_dword.words.lo,
+ (unsigned)txd.lo_dword.words.hi,
+ (unsigned)txd.hi_dword.words.lo,
+ (unsigned)txd.hi_dword.words.hi);
+}
+
+void
+rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
+{
+ const struct rte_memzone *rx_mz;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (rx_queue_id_is_invalid(rxq_id))
+ return;
+ if (rx_desc_id_is_invalid(rxd_id))
+ return;
+ rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
+ if (rx_mz == NULL)
+ return;
+ ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
+}
+
+void
+tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
+{
+ const struct rte_memzone *tx_mz;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (tx_queue_id_is_invalid(txq_id))
+ return;
+ if (tx_desc_id_is_invalid(txd_id))
+ return;
+ tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
+ if (tx_mz == NULL)
+ return;
+ ring_tx_descriptor_display(tx_mz, txd_id);
+}
+
+void
+fwd_lcores_config_display(void)
+{
+ lcoreid_t lc_id;
+
+ printf("List of forwarding lcores:");
+ for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
+ printf(" %2u", fwd_lcores_cpuids[lc_id]);
+ printf("\n");
+}
+void
+rxtx_config_display(void)
+{
+ printf(" %s packet forwarding - CRC stripping %s - "
+ "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
+ rx_mode.hw_strip_crc ? "enabled" : "disabled",
+ nb_pkt_per_burst);
+
+ if (cur_fwd_eng == &tx_only_engine)
+ printf(" packet len=%u - nb packet segments=%d\n",
+ (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
+
+ struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf;
+ struct rte_eth_txconf *tx_conf = &ports[0].tx_conf;
+
+ printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
+ nb_fwd_lcores, nb_fwd_ports);
+ printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n",
+ nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
+ printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
+ rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh,
+ rx_conf->rx_thresh.wthresh);
+ printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n",
+ nb_txq, nb_txd, tx_conf->tx_free_thresh);
+ printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
+ tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh,
+ tx_conf->tx_thresh.wthresh);
+ printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
+ tx_conf->tx_rs_thresh, tx_conf->txq_flags);
+}
+
+void
+port_rss_reta_info(portid_t port_id,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t nb_entries)
+{
+ uint16_t i, idx, shift;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
+ if (ret != 0) {
+ printf("Failed to get RSS RETA info, return code = %d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < nb_entries; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (!(reta_conf[idx].mask & (1ULL << shift)))
+ continue;
+ printf("RSS RETA configuration: hash index=%u, queue=%u\n",
+ i, reta_conf[idx].reta[shift]);
+ }
+}
+
+/*
+ * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
+ * key of the port.
+ */
+void
+port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
+{
+ struct rte_eth_rss_conf rss_conf;
+ uint8_t rss_key[10 * 4] = "";
+ uint64_t rss_hf;
+ uint8_t i;
+ int diag;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ rss_conf.rss_hf = 0;
+ for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ if (!strcmp(rss_info, rss_type_table[i].str))
+ rss_conf.rss_hf = rss_type_table[i].rss_type;
+ }
+
+ /* Get RSS hash key if asked to display it */
+ rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
+ rss_conf.rss_key_len = sizeof(rss_key);
+ diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
+ if (diag != 0) {
+ switch (diag) {
+ case -ENODEV:
+ printf("port index %d invalid\n", port_id);
+ break;
+ case -ENOTSUP:
+ printf("operation not supported by device\n");
+ break;
+ default:
+ printf("operation failed - diag=%d\n", diag);
+ break;
+ }
+ return;
+ }
+ rss_hf = rss_conf.rss_hf;
+ if (rss_hf == 0) {
+ printf("RSS disabled\n");
+ return;
+ }
+ printf("RSS functions:\n ");
+ for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ if (rss_hf & rss_type_table[i].rss_type)
+ printf("%s ", rss_type_table[i].str);
+ }
+ printf("\n");
+ if (!show_rss_key)
+ return;
+ printf("RSS key:\n");
+ for (i = 0; i < sizeof(rss_key); i++)
+ printf("%02X", rss_key[i]);
+ printf("\n");
+}
+
+void
+port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
+ uint hash_key_len)
+{
+ struct rte_eth_rss_conf rss_conf;
+ int diag;
+ unsigned int i;
+
+ rss_conf.rss_key = NULL;
+ rss_conf.rss_key_len = hash_key_len;
+ rss_conf.rss_hf = 0;
+ for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ if (!strcmp(rss_type_table[i].str, rss_type))
+ rss_conf.rss_hf = rss_type_table[i].rss_type;
+ }
+ diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
+ if (diag == 0) {
+ rss_conf.rss_key = hash_key;
+ diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
+ }
+ if (diag == 0)
+ return;
+
+ switch (diag) {
+ case -ENODEV:
+ printf("port index %d invalid\n", port_id);
+ break;
+ case -ENOTSUP:
+ printf("operation not supported by device\n");
+ break;
+ default:
+ printf("operation failed - diag=%d\n", diag);
+ break;
+ }
+}
+
+/*
+ * Setup forwarding configuration for each logical core.
+ */
+static void
+setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
+{
+ streamid_t nb_fs_per_lcore;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+ lcoreid_t nb_extra;
+ lcoreid_t nb_fc;
+ lcoreid_t nb_lc;
+ lcoreid_t lc_id;
+
+ nb_fs = cfg->nb_fwd_streams;
+ nb_fc = cfg->nb_fwd_lcores;
+ if (nb_fs <= nb_fc) {
+ nb_fs_per_lcore = 1;
+ nb_extra = 0;
+ } else {
+ nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
+ nb_extra = (lcoreid_t) (nb_fs % nb_fc);
+ }
+
+ nb_lc = (lcoreid_t) (nb_fc - nb_extra);
+ sm_id = 0;
+ for (lc_id = 0; lc_id < nb_lc; lc_id++) {
+ fwd_lcores[lc_id]->stream_idx = sm_id;
+ fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
+ sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
+ }
+
+ /*
+ * Assign extra remaining streams, if any.
+ */
+ nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
+ for (lc_id = 0; lc_id < nb_extra; lc_id++) {
+ fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
+ fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
+ sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
+ }
+}
+
+static void
+simple_fwd_config_setup(void)
+{
+ portid_t i;
+ portid_t j;
+ portid_t inc = 2;
+
+ if (port_topology == PORT_TOPOLOGY_CHAINED ||
+ port_topology == PORT_TOPOLOGY_LOOP) {
+ inc = 1;
+ } else if (nb_fwd_ports % 2) {
+ printf("\nWarning! Cannot handle an odd number of ports "
+ "with the current port topology. Configuration "
+ "must be changed to have an even number of ports, "
+ "or relaunch application with "
+ "--port-topology=chained\n\n");
+ }
+
+ cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
+ cur_fwd_config.nb_fwd_streams =
+ (streamid_t) cur_fwd_config.nb_fwd_ports;
+
+ /* reinitialize forwarding streams */
+ init_fwd_streams();
+
+ /*
+ * In the simple forwarding test, the number of forwarding cores
+ * must be lower or equal to the number of forwarding ports.
+ */
+ cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
+ if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
+ cur_fwd_config.nb_fwd_lcores =
+ (lcoreid_t) cur_fwd_config.nb_fwd_ports;
+ setup_fwd_config_of_each_lcore(&cur_fwd_config);
+
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
+ if (port_topology != PORT_TOPOLOGY_LOOP)
+ j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
+ else
+ j = i;
+ fwd_streams[i]->rx_port = fwd_ports_ids[i];
+ fwd_streams[i]->rx_queue = 0;
+ fwd_streams[i]->tx_port = fwd_ports_ids[j];
+ fwd_streams[i]->tx_queue = 0;
+ fwd_streams[i]->peer_addr = j;
+
+ if (port_topology == PORT_TOPOLOGY_PAIRED) {
+ fwd_streams[j]->rx_port = fwd_ports_ids[j];
+ fwd_streams[j]->rx_queue = 0;
+ fwd_streams[j]->tx_port = fwd_ports_ids[i];
+ fwd_streams[j]->tx_queue = 0;
+ fwd_streams[j]->peer_addr = i;
+ }
+ }
+}
+
+/**
+ * For the RSS forwarding test, each core is assigned on every port a transmit
+ * queue whose index is the index of the core itself. This approach limits the
+ * maximumm number of processing cores of the RSS test to the maximum number of
+ * TX queues supported by the devices.
+ *
+ * Each core is assigned a single stream, each stream being composed of
+ * a RX queue to poll on a RX port for input messages, associated with
+ * a TX queue of a TX port where to send forwarded packets.
+ * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
+ * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
+ * following rules:
+ * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
+ * - TxQl = RxQj
+ */
+static void
+rss_fwd_config_setup(void)
+{
+ portid_t rxp;
+ portid_t txp;
+ queueid_t rxq;
+ queueid_t nb_q;
+ lcoreid_t lc_id;
+
+ nb_q = nb_rxq;
+ if (nb_q > nb_txq)
+ nb_q = nb_txq;
+ cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
+ cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
+ cur_fwd_config.nb_fwd_streams =
+ (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
+ if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
+ cur_fwd_config.nb_fwd_streams =
+ (streamid_t)cur_fwd_config.nb_fwd_lcores;
+ else
+ cur_fwd_config.nb_fwd_lcores =
+ (lcoreid_t)cur_fwd_config.nb_fwd_streams;
+
+ /* reinitialize forwarding streams */
+ init_fwd_streams();
+
+ setup_fwd_config_of_each_lcore(&cur_fwd_config);
+ rxp = 0; rxq = 0;
+ for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
+ struct fwd_stream *fs;
+
+ fs = fwd_streams[lc_id];
+
+ if ((rxp & 0x1) == 0)
+ txp = (portid_t) (rxp + 1);
+ else
+ txp = (portid_t) (rxp - 1);
+ /*
+ * if we are in loopback, simply send stuff out through the
+ * ingress port
+ */
+ if (port_topology == PORT_TOPOLOGY_LOOP)
+ txp = rxp;
+
+ fs->rx_port = fwd_ports_ids[rxp];
+ fs->rx_queue = rxq;
+ fs->tx_port = fwd_ports_ids[txp];
+ fs->tx_queue = rxq;
+ fs->peer_addr = fs->tx_port;
+ rxq = (queueid_t) (rxq + 1);
+ if (rxq < nb_q)
+ continue;
+ /*
+ * rxq == nb_q
+ * Restart from RX queue 0 on next RX port
+ */
+ rxq = 0;
+ if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
+ rxp = (portid_t)
+ (rxp + ((nb_ports >> 1) / nb_fwd_ports));
+ else
+ rxp = (portid_t) (rxp + 1);
+ }
+}
+
+/**
+ * For the DCB forwarding test, each core is assigned on each traffic class.
+ *
+ * Each core is assigned a multi-stream, each stream being composed of
+ * a RX queue to poll on a RX port for input messages, associated with
+ * a TX queue of a TX port where to send forwarded packets. All RX and
+ * TX queues are mapping to the same traffic class.
+ * If VMDQ and DCB co-exist, each traffic class on different POOLs share
+ * the same core
+ */
+static void
+dcb_fwd_config_setup(void)
+{
+ struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
+ portid_t txp, rxp = 0;
+ queueid_t txq, rxq = 0;
+ lcoreid_t lc_id;
+ uint16_t nb_rx_queue, nb_tx_queue;
+ uint16_t i, j, k, sm_id = 0;
+ uint8_t tc = 0;
+
+ cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
+ cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
+ cur_fwd_config.nb_fwd_streams =
+ (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
+
+ /* reinitialize forwarding streams */
+ init_fwd_streams();
+ sm_id = 0;
+ txp = 1;
+ /* get the dcb info on the first RX and TX ports */
+ (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
+ (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
+
+ for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
+ fwd_lcores[lc_id]->stream_nb = 0;
+ fwd_lcores[lc_id]->stream_idx = sm_id;
+ for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
+ /* if the nb_queue is zero, means this tc is
+ * not enabled on the POOL
+ */
+ if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
+ break;
+ k = fwd_lcores[lc_id]->stream_nb +
+ fwd_lcores[lc_id]->stream_idx;
+ rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
+ txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
+ nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
+ nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
+ for (j = 0; j < nb_rx_queue; j++) {
+ struct fwd_stream *fs;
+
+ fs = fwd_streams[k + j];
+ fs->rx_port = fwd_ports_ids[rxp];
+ fs->rx_queue = rxq + j;
+ fs->tx_port = fwd_ports_ids[txp];
+ fs->tx_queue = txq + j % nb_tx_queue;
+ fs->peer_addr = fs->tx_port;
+ }
+ fwd_lcores[lc_id]->stream_nb +=
+ rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
+ }
+ sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
+
+ tc++;
+ if (tc < rxp_dcb_info.nb_tcs)
+ continue;
+ /* Restart from TC 0 on next RX port */
+ tc = 0;
+ if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
+ rxp = (portid_t)
+ (rxp + ((nb_ports >> 1) / nb_fwd_ports));
+ else
+ rxp++;
+ if (rxp >= nb_fwd_ports)
+ return;
+ /* get the dcb information on next RX and TX ports */
+ if ((rxp & 0x1) == 0)
+ txp = (portid_t) (rxp + 1);
+ else
+ txp = (portid_t) (rxp - 1);
+ rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
+ rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
+ }
+}
+
+static void
+icmp_echo_config_setup(void)
+{
+ portid_t rxp;
+ queueid_t rxq;
+ lcoreid_t lc_id;
+ uint16_t sm_id;
+
+ if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
+ cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
+ (nb_txq * nb_fwd_ports);
+ else
+ cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
+ cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
+ cur_fwd_config.nb_fwd_streams =
+ (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
+ if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
+ cur_fwd_config.nb_fwd_lcores =
+ (lcoreid_t)cur_fwd_config.nb_fwd_streams;
+ if (verbose_level > 0) {
+ printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
+ __FUNCTION__,
+ cur_fwd_config.nb_fwd_lcores,
+ cur_fwd_config.nb_fwd_ports,
+ cur_fwd_config.nb_fwd_streams);
+ }
+
+ /* reinitialize forwarding streams */
+ init_fwd_streams();
+ setup_fwd_config_of_each_lcore(&cur_fwd_config);
+ rxp = 0; rxq = 0;
+ for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
+ if (verbose_level > 0)
+ printf(" core=%d: \n", lc_id);
+ for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
+ struct fwd_stream *fs;
+ fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
+ fs->rx_port = fwd_ports_ids[rxp];
+ fs->rx_queue = rxq;
+ fs->tx_port = fs->rx_port;
+ fs->tx_queue = rxq;
+ fs->peer_addr = fs->tx_port;
+ if (verbose_level > 0)
+ printf(" stream=%d port=%d rxq=%d txq=%d\n",
+ sm_id, fs->rx_port, fs->rx_queue,
+ fs->tx_queue);
+ rxq = (queueid_t) (rxq + 1);
+ if (rxq == nb_rxq) {
+ rxq = 0;
+ rxp = (portid_t) (rxp + 1);
+ }
+ }
+ }
+}
+
+void
+fwd_config_setup(void)
+{
+ cur_fwd_config.fwd_eng = cur_fwd_eng;
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
+ icmp_echo_config_setup();
+ return;
+ }
+ if ((nb_rxq > 1) && (nb_txq > 1)){
+ if (dcb_config)
+ dcb_fwd_config_setup();
+ else
+ rss_fwd_config_setup();
+ }
+ else
+ simple_fwd_config_setup();
+}
+
+static void
+pkt_fwd_config_display(struct fwd_config *cfg)
+{
+ struct fwd_stream *fs;
+ lcoreid_t lc_id;
+ streamid_t sm_id;
+
+ printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
+ "NUMA support %s, MP over anonymous pages %s\n",
+ cfg->fwd_eng->fwd_mode_name,
+ cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
+ numa_support == 1 ? "enabled" : "disabled",
+ mp_anon != 0 ? "enabled" : "disabled");
+
+ if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0)
+ printf("TX retry num: %u, delay between TX retries: %uus\n",
+ burst_tx_retry_num, burst_tx_delay_time);
+ for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
+ printf("Logical Core %u (socket %u) forwards packets on "
+ "%d streams:",
+ fwd_lcores_cpuids[lc_id],
+ rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
+ fwd_lcores[lc_id]->stream_nb);
+ for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
+ fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
+ printf("\n RX P=%d/Q=%d (socket %u) -> TX "
+ "P=%d/Q=%d (socket %u) ",
+ fs->rx_port, fs->rx_queue,
+ ports[fs->rx_port].socket_id,
+ fs->tx_port, fs->tx_queue,
+ ports[fs->tx_port].socket_id);
+ print_ethaddr("peer=",
+ &peer_eth_addrs[fs->peer_addr]);
+ }
+ printf("\n");
+ }
+ printf("\n");
+}
+
+
+void
+fwd_config_display(void)
+{
+ fwd_config_setup();
+ pkt_fwd_config_display(&cur_fwd_config);
+}
+
+int
+set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
+{
+ unsigned int i;
+ unsigned int lcore_cpuid;
+ int record_now;
+
+ record_now = 0;
+ again:
+ for (i = 0; i < nb_lc; i++) {
+ lcore_cpuid = lcorelist[i];
+ if (! rte_lcore_is_enabled(lcore_cpuid)) {
+ printf("lcore %u not enabled\n", lcore_cpuid);
+ return -1;
+ }
+ if (lcore_cpuid == rte_get_master_lcore()) {
+ printf("lcore %u cannot be masked on for running "
+ "packet forwarding, which is the master lcore "
+ "and reserved for command line parsing only\n",
+ lcore_cpuid);
+ return -1;
+ }
+ if (record_now)
+ fwd_lcores_cpuids[i] = lcore_cpuid;
+ }
+ if (record_now == 0) {
+ record_now = 1;
+ goto again;
+ }
+ nb_cfg_lcores = (lcoreid_t) nb_lc;
+ if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
+ printf("previous number of forwarding cores %u - changed to "
+ "number of configured cores %u\n",
+ (unsigned int) nb_fwd_lcores, nb_lc);
+ nb_fwd_lcores = (lcoreid_t) nb_lc;
+ }
+
+ return 0;
+}
+
+int
+set_fwd_lcores_mask(uint64_t lcoremask)
+{
+ unsigned int lcorelist[64];
+ unsigned int nb_lc;
+ unsigned int i;
+
+ if (lcoremask == 0) {
+ printf("Invalid NULL mask of cores\n");
+ return -1;
+ }
+ nb_lc = 0;
+ for (i = 0; i < 64; i++) {
+ if (! ((uint64_t)(1ULL << i) & lcoremask))
+ continue;
+ lcorelist[nb_lc++] = i;
+ }
+ return set_fwd_lcores_list(lcorelist, nb_lc);
+}
+
+void
+set_fwd_lcores_number(uint16_t nb_lc)
+{
+ if (nb_lc > nb_cfg_lcores) {
+ printf("nb fwd cores %u > %u (max. number of configured "
+ "lcores) - ignored\n",
+ (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
+ return;
+ }
+ nb_fwd_lcores = (lcoreid_t) nb_lc;
+ printf("Number of forwarding cores set to %u\n",
+ (unsigned int) nb_fwd_lcores);
+}
+
+void
+set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
+{
+ unsigned int i;
+ portid_t port_id;
+ int record_now;
+
+ record_now = 0;
+ again:
+ for (i = 0; i < nb_pt; i++) {
+ port_id = (portid_t) portlist[i];
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (record_now)
+ fwd_ports_ids[i] = port_id;
+ }
+ if (record_now == 0) {
+ record_now = 1;
+ goto again;
+ }
+ nb_cfg_ports = (portid_t) nb_pt;
+ if (nb_fwd_ports != (portid_t) nb_pt) {
+ printf("previous number of forwarding ports %u - changed to "
+ "number of configured ports %u\n",
+ (unsigned int) nb_fwd_ports, nb_pt);
+ nb_fwd_ports = (portid_t) nb_pt;
+ }
+}
+
+void
+set_fwd_ports_mask(uint64_t portmask)
+{
+ unsigned int portlist[64];
+ unsigned int nb_pt;
+ unsigned int i;
+
+ if (portmask == 0) {
+ printf("Invalid NULL mask of ports\n");
+ return;
+ }
+ nb_pt = 0;
+ for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) {
+ if (! ((uint64_t)(1ULL << i) & portmask))
+ continue;
+ portlist[nb_pt++] = i;
+ }
+ set_fwd_ports_list(portlist, nb_pt);
+}
+
+void
+set_fwd_ports_number(uint16_t nb_pt)
+{
+ if (nb_pt > nb_cfg_ports) {
+ printf("nb fwd ports %u > %u (number of configured "
+ "ports) - ignored\n",
+ (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
+ return;
+ }
+ nb_fwd_ports = (portid_t) nb_pt;
+ printf("Number of forwarding ports set to %u\n",
+ (unsigned int) nb_fwd_ports);
+}
+
+void
+set_nb_pkt_per_burst(uint16_t nb)
+{
+ if (nb > MAX_PKT_BURST) {
+ printf("nb pkt per burst: %u > %u (maximum packet per burst) "
+ " ignored\n",
+ (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
+ return;
+ }
+ nb_pkt_per_burst = nb;
+ printf("Number of packets per burst set to %u\n",
+ (unsigned int) nb_pkt_per_burst);
+}
+
+static const char *
+tx_split_get_name(enum tx_pkt_split split)
+{
+ uint32_t i;
+
+ for (i = 0; i != RTE_DIM(tx_split_name); i++) {
+ if (tx_split_name[i].split == split)
+ return tx_split_name[i].name;
+ }
+ return NULL;
+}
+
+void
+set_tx_pkt_split(const char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i != RTE_DIM(tx_split_name); i++) {
+ if (strcmp(tx_split_name[i].name, name) == 0) {
+ tx_pkt_split = tx_split_name[i].split;
+ return;
+ }
+ }
+ printf("unknown value: \"%s\"\n", name);
+}
+
+void
+show_tx_pkt_segments(void)
+{
+ uint32_t i, n;
+ const char *split;
+
+ n = tx_pkt_nb_segs;
+ split = tx_split_get_name(tx_pkt_split);
+
+ printf("Number of segments: %u\n", n);
+ printf("Segment sizes: ");
+ for (i = 0; i != n - 1; i++)
+ printf("%hu,", tx_pkt_seg_lengths[i]);
+ printf("%hu\n", tx_pkt_seg_lengths[i]);
+ printf("Split packet: %s\n", split);
+}
+
+void
+set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
+{
+ uint16_t tx_pkt_len;
+ unsigned i;
+
+ if (nb_segs >= (unsigned) nb_txd) {
+ printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
+ nb_segs, (unsigned int) nb_txd);
+ return;
+ }
+
+ /*
+ * Check that each segment length is greater or equal than
+ * the mbuf data sise.
+ * Check also that the total packet length is greater or equal than the
+ * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
+ */
+ tx_pkt_len = 0;
+ for (i = 0; i < nb_segs; i++) {
+ if (seg_lengths[i] > (unsigned) mbuf_data_size) {
+ printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
+ i, seg_lengths[i], (unsigned) mbuf_data_size);
+ return;
+ }
+ tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
+ }
+ if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
+ printf("total packet length=%u < %d - give up\n",
+ (unsigned) tx_pkt_len,
+ (int)(sizeof(struct ether_hdr) + 20 + 8));
+ return;
+ }
+
+ for (i = 0; i < nb_segs; i++)
+ tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
+
+ tx_pkt_length = tx_pkt_len;
+ tx_pkt_nb_segs = (uint8_t) nb_segs;
+}
+
+char*
+list_pkt_forwarding_modes(void)
+{
+ static char fwd_modes[128] = "";
+ const char *separator = "|";
+ struct fwd_engine *fwd_eng;
+ unsigned i = 0;
+
+ if (strlen (fwd_modes) == 0) {
+ while ((fwd_eng = fwd_engines[i++]) != NULL) {
+ strcat(fwd_modes, fwd_eng->fwd_mode_name);
+ strcat(fwd_modes, separator);
+ }
+ fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
+ }
+
+ return fwd_modes;
+}
+
+void
+set_pkt_forwarding_mode(const char *fwd_mode_name)
+{
+ struct fwd_engine *fwd_eng;
+ unsigned i;
+
+ i = 0;
+ while ((fwd_eng = fwd_engines[i]) != NULL) {
+ if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
+ printf("Set %s packet forwarding mode\n",
+ fwd_mode_name);
+ cur_fwd_eng = fwd_eng;
+ return;
+ }
+ i++;
+ }
+ printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
+}
+
+void
+set_verbose_level(uint16_t vb_level)
+{
+ printf("Change verbose level from %u to %u\n",
+ (unsigned int) verbose_level, (unsigned int) vb_level);
+ verbose_level = vb_level;
+}
+
+void
+vlan_extend_set(portid_t port_id, int on)
+{
+ int diag;
+ int vlan_offload;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+
+ if (on)
+ vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
+ else
+ vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
+
+ diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
+ if (diag < 0)
+ printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
+ "diag=%d\n", port_id, on, diag);
+}
+
+void
+rx_vlan_strip_set(portid_t port_id, int on)
+{
+ int diag;
+ int vlan_offload;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+
+ if (on)
+ vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
+ else
+ vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
+
+ diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
+ if (diag < 0)
+ printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
+ "diag=%d\n", port_id, on, diag);
+}
+
+void
+rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
+ if (diag < 0)
+ printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
+ "diag=%d\n", port_id, queue_id, on, diag);
+}
+
+void
+rx_vlan_filter_set(portid_t port_id, int on)
+{
+ int diag;
+ int vlan_offload;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+
+ if (on)
+ vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
+ else
+ vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
+
+ diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
+ if (diag < 0)
+ printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
+ "diag=%d\n", port_id, on, diag);
+}
+
+int
+rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return 1;
+ if (vlan_id_is_invalid(vlan_id))
+ return 1;
+ diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
+ if (diag == 0)
+ return 0;
+ printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
+ "diag=%d\n",
+ port_id, vlan_id, on, diag);
+ return -1;
+}
+
+void
+rx_vlan_all_filter_set(portid_t port_id, int on)
+{
+ uint16_t vlan_id;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
+ if (rx_vft_set(port_id, vlan_id, on))
+ break;
+ }
+}
+
+void
+vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
+ if (diag == 0)
+ return;
+
+ printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
+ "diag=%d\n",
+ port_id, vlan_type, tp_id, diag);
+}
+
+void
+tx_vlan_set(portid_t port_id, uint16_t vlan_id)
+{
+ int vlan_offload;
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (vlan_id_is_invalid(vlan_id))
+ return;
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+ if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
+ printf("Error, as QinQ has been enabled.\n");
+ return;
+ }
+
+ tx_vlan_reset(port_id);
+ ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
+ ports[port_id].tx_vlan_id = vlan_id;
+}
+
+void
+tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
+{
+ int vlan_offload;
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (vlan_id_is_invalid(vlan_id))
+ return;
+ if (vlan_id_is_invalid(vlan_id_outer))
+ return;
+
+ vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
+ if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
+ printf("Error, as QinQ hasn't been enabled.\n");
+ return;
+ }
+
+ tx_vlan_reset(port_id);
+ ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ;
+ ports[port_id].tx_vlan_id = vlan_id;
+ ports[port_id].tx_vlan_id_outer = vlan_id_outer;
+}
+
+void
+tx_vlan_reset(portid_t port_id)
+{
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN |
+ TESTPMD_TX_OFFLOAD_INSERT_QINQ);
+ ports[port_id].tx_vlan_id = 0;
+ ports[port_id].tx_vlan_id_outer = 0;
+}
+
+void
+tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
+{
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
+}
+
+void
+set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
+{
+ uint16_t i;
+ uint8_t existing_mapping_found = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
+ return;
+
+ if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ printf("map_value not in required range 0..%d\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ return;
+ }
+
+ if (!is_rx) { /*then tx*/
+ for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
+ if ((tx_queue_stats_mappings[i].port_id == port_id) &&
+ (tx_queue_stats_mappings[i].queue_id == queue_id)) {
+ tx_queue_stats_mappings[i].stats_counter_id = map_value;
+ existing_mapping_found = 1;
+ break;
+ }
+ }
+ if (!existing_mapping_found) { /* A new additional mapping... */
+ tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
+ tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
+ tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
+ nb_tx_queue_stats_mappings++;
+ }
+ }
+ else { /*rx*/
+ for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
+ if ((rx_queue_stats_mappings[i].port_id == port_id) &&
+ (rx_queue_stats_mappings[i].queue_id == queue_id)) {
+ rx_queue_stats_mappings[i].stats_counter_id = map_value;
+ existing_mapping_found = 1;
+ break;
+ }
+ }
+ if (!existing_mapping_found) { /* A new additional mapping... */
+ rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
+ rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
+ rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
+ nb_rx_queue_stats_mappings++;
+ }
+ }
+}
+
+static inline void
+print_fdir_mask(struct rte_eth_fdir_masks *mask)
+{
+ printf("\n vlan_tci: 0x%04x, ", mask->vlan_tci_mask);
+
+ if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ printf("mac_addr: 0x%02x", mask->mac_addr_byte_mask);
+ else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ printf("mac_addr: 0x%02x, tunnel_type: 0x%01x, tunnel_id: 0x%08x",
+ mask->mac_addr_byte_mask, mask->tunnel_type_mask,
+ mask->tunnel_id_mask);
+ else {
+ printf("src_ipv4: 0x%08x, dst_ipv4: 0x%08x,"
+ " src_port: 0x%04x, dst_port: 0x%04x",
+ mask->ipv4_mask.src_ip, mask->ipv4_mask.dst_ip,
+ mask->src_port_mask, mask->dst_port_mask);
+
+ printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x,"
+ " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
+ mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1],
+ mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3],
+ mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1],
+ mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]);
+ }
+
+ printf("\n");
+}
+
+static inline void
+print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
+{
+ struct rte_eth_flex_payload_cfg *cfg;
+ uint32_t i, j;
+
+ for (i = 0; i < flex_conf->nb_payloads; i++) {
+ cfg = &flex_conf->flex_set[i];
+ if (cfg->type == RTE_ETH_RAW_PAYLOAD)
+ printf("\n RAW: ");
+ else if (cfg->type == RTE_ETH_L2_PAYLOAD)
+ printf("\n L2_PAYLOAD: ");
+ else if (cfg->type == RTE_ETH_L3_PAYLOAD)
+ printf("\n L3_PAYLOAD: ");
+ else if (cfg->type == RTE_ETH_L4_PAYLOAD)
+ printf("\n L4_PAYLOAD: ");
+ else
+ printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
+ for (j = 0; j < num; j++)
+ printf(" %-5u", cfg->src_offset[j]);
+ }
+ printf("\n");
+}
+
+static char *
+flowtype_to_str(uint16_t flow_type)
+{
+ struct flow_type_info {
+ char str[32];
+ uint16_t ftype;
+ };
+
+ uint8_t i;
+ static struct flow_type_info flowtype_str_table[] = {
+ {"raw", RTE_ETH_FLOW_RAW},
+ {"ipv4", RTE_ETH_FLOW_IPV4},
+ {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
+ {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
+ {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
+ {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
+ {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
+ {"ipv6", RTE_ETH_FLOW_IPV6},
+ {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
+ {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
+ {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
+ {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
+ {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
+ {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
+ };
+
+ for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
+ if (flowtype_str_table[i].ftype == flow_type)
+ return flowtype_str_table[i].str;
+ }
+
+ return NULL;
+}
+
+static inline void
+print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
+{
+ struct rte_eth_fdir_flex_mask *mask;
+ uint32_t i, j;
+ char *p;
+
+ for (i = 0; i < flex_conf->nb_flexmasks; i++) {
+ mask = &flex_conf->flex_mask[i];
+ p = flowtype_to_str(mask->flow_type);
+ printf("\n %s:\t", p ? p : "unknown");
+ for (j = 0; j < num; j++)
+ printf(" %02x", mask->mask[j]);
+ }
+ printf("\n");
+}
+
+static inline void
+print_fdir_flow_type(uint32_t flow_types_mask)
+{
+ int i;
+ char *p;
+
+ for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
+ if (!(flow_types_mask & (1 << i)))
+ continue;
+ p = flowtype_to_str(i);
+ if (p)
+ printf(" %s", p);
+ else
+ printf(" unknown");
+ }
+ printf("\n");
+}
+
+void
+fdir_get_infos(portid_t port_id)
+{
+ struct rte_eth_fdir_stats fdir_stat;
+ struct rte_eth_fdir_info fdir_info;
+ int ret;
+
+ static const char *fdir_stats_border = "########################";
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
+ if (ret < 0) {
+ printf("\n FDIR is not supported on port %-2d\n",
+ port_id);
+ return;
+ }
+
+ memset(&fdir_info, 0, sizeof(fdir_info));
+ rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_INFO, &fdir_info);
+ memset(&fdir_stat, 0, sizeof(fdir_stat));
+ rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_STATS, &fdir_stat);
+ printf("\n %s FDIR infos for port %-2d %s\n",
+ fdir_stats_border, port_id, fdir_stats_border);
+ printf(" MODE: ");
+ if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
+ printf(" PERFECT\n");
+ else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ printf(" PERFECT-MAC-VLAN\n");
+ else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ printf(" PERFECT-TUNNEL\n");
+ else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
+ printf(" SIGNATURE\n");
+ else
+ printf(" DISABLE\n");
+ if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
+ && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ printf(" SUPPORTED FLOW TYPE: ");
+ print_fdir_flow_type(fdir_info.flow_types_mask[0]);
+ }
+ printf(" FLEX PAYLOAD INFO:\n");
+ printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
+ " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
+ " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
+ fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
+ fdir_info.flex_payload_unit,
+ fdir_info.max_flex_payload_segment_num,
+ fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
+ printf(" MASK: ");
+ print_fdir_mask(&fdir_info.mask);
+ if (fdir_info.flex_conf.nb_payloads > 0) {
+ printf(" FLEX PAYLOAD SRC OFFSET:");
+ print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
+ }
+ if (fdir_info.flex_conf.nb_flexmasks > 0) {
+ printf(" FLEX MASK CFG:");
+ print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
+ }
+ printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
+ fdir_stat.guarant_cnt, fdir_stat.best_cnt);
+ printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
+ fdir_info.guarant_spc, fdir_info.best_spc);
+ printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
+ " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
+ " add: %-10"PRIu64" remove: %"PRIu64"\n"
+ " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
+ fdir_stat.collision, fdir_stat.free,
+ fdir_stat.maxhash, fdir_stat.maxlen,
+ fdir_stat.add, fdir_stat.remove,
+ fdir_stat.f_add, fdir_stat.f_remove);
+ printf(" %s############################%s\n",
+ fdir_stats_border, fdir_stats_border);
+}
+
+void
+fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
+{
+ struct rte_port *port;
+ struct rte_eth_fdir_flex_conf *flex_conf;
+ int i, idx = 0;
+
+ port = &ports[port_id];
+ flex_conf = &port->dev_conf.fdir_conf.flex_conf;
+ for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
+ if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
+ idx = i;
+ break;
+ }
+ }
+ if (i >= RTE_ETH_FLOW_MAX) {
+ if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
+ idx = flex_conf->nb_flexmasks;
+ flex_conf->nb_flexmasks++;
+ } else {
+ printf("The flex mask table is full. Can not set flex"
+ " mask for flow_type(%u).", cfg->flow_type);
+ return;
+ }
+ }
+ (void)rte_memcpy(&flex_conf->flex_mask[idx],
+ cfg,
+ sizeof(struct rte_eth_fdir_flex_mask));
+}
+
+void
+fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
+{
+ struct rte_port *port;
+ struct rte_eth_fdir_flex_conf *flex_conf;
+ int i, idx = 0;
+
+ port = &ports[port_id];
+ flex_conf = &port->dev_conf.fdir_conf.flex_conf;
+ for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
+ if (cfg->type == flex_conf->flex_set[i].type) {
+ idx = i;
+ break;
+ }
+ }
+ if (i >= RTE_ETH_PAYLOAD_MAX) {
+ if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
+ idx = flex_conf->nb_payloads;
+ flex_conf->nb_payloads++;
+ } else {
+ printf("The flex payload table is full. Can not set"
+ " flex payload for type(%u).", cfg->type);
+ return;
+ }
+ }
+ (void)rte_memcpy(&flex_conf->flex_set[idx],
+ cfg,
+ sizeof(struct rte_eth_flex_payload_cfg));
+
+}
+
+void
+set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (is_rx)
+ diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
+ else
+ diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
+ if (diag == 0)
+ return;
+ if(is_rx)
+ printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
+ "diag=%d\n", port_id, diag);
+ else
+ printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
+ "diag=%d\n", port_id, diag);
+
+}
+
+void
+set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
+{
+ int diag;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+ if (vlan_id_is_invalid(vlan_id))
+ return;
+ diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
+ if (diag == 0)
+ return;
+ printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
+ "diag=%d\n", port_id, diag);
+}
+
+int
+set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
+{
+ int diag;
+ struct rte_eth_link link;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return 1;
+ rte_eth_link_get_nowait(port_id, &link);
+ if (rate > link.link_speed) {
+ printf("Invalid rate value:%u bigger than link speed: %u\n",
+ rate, link.link_speed);
+ return 1;
+ }
+ diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
+ if (diag == 0)
+ return diag;
+ printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
+ port_id, diag);
+ return diag;
+}
+
+int
+set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
+{
+ int diag;
+ struct rte_eth_link link;
+
+ if (q_msk == 0)
+ return 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return 1;
+ rte_eth_link_get_nowait(port_id, &link);
+ if (rate > link.link_speed) {
+ printf("Invalid rate value:%u bigger than link speed: %u\n",
+ rate, link.link_speed);
+ return 1;
+ }
+ diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk);
+ if (diag == 0)
+ return diag;
+ printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n",
+ port_id, diag);
+ return diag;
+}
+
+/*
+ * Functions to manage the set of filtered Multicast MAC addresses.
+ *
+ * A pool of filtered multicast MAC addresses is associated with each port.
+ * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
+ * The address of the pool and the number of valid multicast MAC addresses
+ * recorded in the pool are stored in the fields "mc_addr_pool" and
+ * "mc_addr_nb" of the "rte_port" data structure.
+ *
+ * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
+ * to be supplied a contiguous array of multicast MAC addresses.
+ * To comply with this constraint, the set of multicast addresses recorded
+ * into the pool are systematically compacted at the beginning of the pool.
+ * Hence, when a multicast address is removed from the pool, all following
+ * addresses, if any, are copied back to keep the set contiguous.
+ */
+#define MCAST_POOL_INC 32
+
+static int
+mcast_addr_pool_extend(struct rte_port *port)
+{
+ struct ether_addr *mc_pool;
+ size_t mc_pool_size;
+
+ /*
+ * If a free entry is available at the end of the pool, just
+ * increment the number of recorded multicast addresses.
+ */
+ if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
+ port->mc_addr_nb++;
+ return 0;
+ }
+
+ /*
+ * [re]allocate a pool with MCAST_POOL_INC more entries.
+ * The previous test guarantees that port->mc_addr_nb is a multiple
+ * of MCAST_POOL_INC.
+ */
+ mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
+ MCAST_POOL_INC);
+ mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
+ mc_pool_size);
+ if (mc_pool == NULL) {
+ printf("allocation of pool of %u multicast addresses failed\n",
+ port->mc_addr_nb + MCAST_POOL_INC);
+ return -ENOMEM;
+ }
+
+ port->mc_addr_pool = mc_pool;
+ port->mc_addr_nb++;
+ return 0;
+
+}
+
+static void
+mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
+{
+ port->mc_addr_nb--;
+ if (addr_idx == port->mc_addr_nb) {
+ /* No need to recompact the set of multicast addressses. */
+ if (port->mc_addr_nb == 0) {
+ /* free the pool of multicast addresses. */
+ free(port->mc_addr_pool);
+ port->mc_addr_pool = NULL;
+ }
+ return;
+ }
+ memmove(&port->mc_addr_pool[addr_idx],
+ &port->mc_addr_pool[addr_idx + 1],
+ sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
+}
+
+static void
+eth_port_multicast_addr_list_set(uint8_t port_id)
+{
+ struct rte_port *port;
+ int diag;
+
+ port = &ports[port_id];
+ diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
+ port->mc_addr_nb);
+ if (diag == 0)
+ return;
+ printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
+ port->mc_addr_nb, port_id, -diag);
+}
+
+void
+mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr)
+{
+ struct rte_port *port;
+ uint32_t i;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ port = &ports[port_id];
+
+ /*
+ * Check that the added multicast MAC address is not already recorded
+ * in the pool of multicast addresses.
+ */
+ for (i = 0; i < port->mc_addr_nb; i++) {
+ if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
+ printf("multicast address already filtered by port\n");
+ return;
+ }
+ }
+
+ if (mcast_addr_pool_extend(port) != 0)
+ return;
+ ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
+ eth_port_multicast_addr_list_set(port_id);
+}
+
+void
+mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr)
+{
+ struct rte_port *port;
+ uint32_t i;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ port = &ports[port_id];
+
+ /*
+ * Search the pool of multicast MAC addresses for the removed address.
+ */
+ for (i = 0; i < port->mc_addr_nb; i++) {
+ if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
+ break;
+ }
+ if (i == port->mc_addr_nb) {
+ printf("multicast address not filtered by port %d\n", port_id);
+ return;
+ }
+
+ mcast_addr_pool_remove(port, i);
+ eth_port_multicast_addr_list_set(port_id);
+}
+
+void
+port_dcb_info_display(uint8_t port_id)
+{
+ struct rte_eth_dcb_info dcb_info;
+ uint16_t i;
+ int ret;
+ static const char *border = "================";
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
+ if (ret) {
+ printf("\n Failed to get dcb infos on port %-2d\n",
+ port_id);
+ return;
+ }
+ printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
+ printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
+ printf("\n TC : ");
+ for (i = 0; i < dcb_info.nb_tcs; i++)
+ printf("\t%4d", i);
+ printf("\n Priority : ");
+ for (i = 0; i < dcb_info.nb_tcs; i++)
+ printf("\t%4d", dcb_info.prio_tc[i]);
+ printf("\n BW percent :");
+ for (i = 0; i < dcb_info.nb_tcs; i++)
+ printf("\t%4d%%", dcb_info.tc_bws[i]);
+ printf("\n RXQ base : ");
+ for (i = 0; i < dcb_info.nb_tcs; i++)
+ printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
+ printf("\n RXQ number :");
+ for (i = 0; i < dcb_info.nb_tcs; i++)
+ printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
+ printf("\n TXQ base : ");
+ for (i = 0; i < dcb_info.nb_tcs; i++)
+ printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
+ printf("\n TXQ number :");
+ for (i = 0; i < dcb_info.nb_tcs; i++)
+ printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
+ printf("\n");
+}
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
new file mode 100644
index 00000000..7e4f6620
--- /dev/null
+++ b/app/test-pmd/csumonly.c
@@ -0,0 +1,873 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include "testpmd.h"
+
+#define IP_DEFTTL 64 /* from RFC 1340. */
+#define IP_VERSION 0x40
+#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
+#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
+
+#define GRE_KEY_PRESENT 0x2000
+#define GRE_KEY_LEN 4
+#define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT
+
+/* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
+#else
+#define _htons(x) (x)
+#endif
+
+/* structure that caches offload info for the current packet */
+struct testpmd_offload_info {
+ uint16_t ethertype;
+ uint16_t l2_len;
+ uint16_t l3_len;
+ uint16_t l4_len;
+ uint8_t l4_proto;
+ uint8_t is_tunnel;
+ uint16_t outer_ethertype;
+ uint16_t outer_l2_len;
+ uint16_t outer_l3_len;
+ uint8_t outer_l4_proto;
+ uint16_t tso_segsz;
+};
+
+/* simplified GRE header */
+struct simple_gre_hdr {
+ uint16_t flags;
+ uint16_t proto;
+} __attribute__((__packed__));
+
+static uint16_t
+get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags)
+{
+ if (ethertype == _htons(ETHER_TYPE_IPv4))
+ return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
+}
+
+static uint16_t
+get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
+{
+ if (ethertype == _htons(ETHER_TYPE_IPv4))
+ return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
+}
+
+/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
+static void
+parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
+{
+ struct tcp_hdr *tcp_hdr;
+
+ info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
+ info->l4_proto = ipv4_hdr->next_proto_id;
+
+ /* only fill l4_len for TCP, it's useful for TSO */
+ if (info->l4_proto == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len);
+ info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+ } else
+ info->l4_len = 0;
+}
+
+/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
+static void
+parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
+{
+ struct tcp_hdr *tcp_hdr;
+
+ info->l3_len = sizeof(struct ipv6_hdr);
+ info->l4_proto = ipv6_hdr->proto;
+
+ /* only fill l4_len for TCP, it's useful for TSO */
+ if (info->l4_proto == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len);
+ info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+ } else
+ info->l4_len = 0;
+}
+
+/*
+ * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
+ * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
+ * header. The l4_len argument is only set in case of TCP (useful for TSO).
+ */
+static void
+parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+
+ info->l2_len = sizeof(struct ether_hdr);
+ info->ethertype = eth_hdr->ether_type;
+
+ if (info->ethertype == _htons(ETHER_TYPE_VLAN)) {
+ struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+
+ info->l2_len += sizeof(struct vlan_hdr);
+ info->ethertype = vlan_hdr->eth_proto;
+ }
+
+ switch (info->ethertype) {
+ case _htons(ETHER_TYPE_IPv4):
+ ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
+ parse_ipv4(ipv4_hdr, info);
+ break;
+ case _htons(ETHER_TYPE_IPv6):
+ ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
+ parse_ipv6(ipv6_hdr, info);
+ break;
+ default:
+ info->l4_len = 0;
+ info->l3_len = 0;
+ info->l4_proto = 0;
+ break;
+ }
+}
+
+/* Parse a vxlan header */
+static void
+parse_vxlan(struct udp_hdr *udp_hdr,
+ struct testpmd_offload_info *info,
+ uint32_t pkt_type)
+{
+ struct ether_hdr *eth_hdr;
+
+ /* check udp destination port, 4789 is the default vxlan port
+ * (rfc7348) or that the rx offload flag is set (i40e only
+ * currently) */
+ if (udp_hdr->dst_port != _htons(4789) &&
+ RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0)
+ return;
+
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ eth_hdr = (struct ether_hdr *)((char *)udp_hdr +
+ sizeof(struct udp_hdr) +
+ sizeof(struct vxlan_hdr));
+
+ parse_ethernet(eth_hdr, info);
+ info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
+}
+
+/* Parse a gre header */
+static void
+parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
+{
+ struct ether_hdr *eth_hdr;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ uint8_t gre_len = 0;
+
+ /* check which fields are supported */
+ if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0)
+ return;
+
+ gre_len += sizeof(struct simple_gre_hdr);
+
+ if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
+ gre_len += GRE_KEY_LEN;
+
+ if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
+
+ parse_ipv4(ipv4_hdr, info);
+ info->ethertype = _htons(ETHER_TYPE_IPv4);
+ info->l2_len = 0;
+
+ } else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
+
+ info->ethertype = _htons(ETHER_TYPE_IPv6);
+ parse_ipv6(ipv6_hdr, info);
+ info->l2_len = 0;
+
+ } else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ eth_hdr = (struct ether_hdr *)((char *)gre_hdr + gre_len);
+
+ parse_ethernet(eth_hdr, info);
+ } else
+ return;
+
+ info->l2_len += gre_len;
+}
+
+
+/* Parse an encapsulated ip or ipv6 header */
+static void
+parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
+{
+ struct ipv4_hdr *ipv4_hdr = encap_ip;
+ struct ipv6_hdr *ipv6_hdr = encap_ip;
+ uint8_t ip_version;
+
+ ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
+
+ if (ip_version != 4 && ip_version != 6)
+ return;
+
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+
+ if (ip_version == 4) {
+ parse_ipv4(ipv4_hdr, info);
+ info->ethertype = _htons(ETHER_TYPE_IPv4);
+ } else {
+ parse_ipv6(ipv6_hdr, info);
+ info->ethertype = _htons(ETHER_TYPE_IPv6);
+ }
+ info->l2_len = 0;
+}
+
+/* modify the IPv4 or IPv4 source address of a packet */
+static void
+change_ip_addresses(void *l3_hdr, uint16_t ethertype)
+{
+ struct ipv4_hdr *ipv4_hdr = l3_hdr;
+ struct ipv6_hdr *ipv6_hdr = l3_hdr;
+
+ if (ethertype == _htons(ETHER_TYPE_IPv4)) {
+ ipv4_hdr->src_addr =
+ rte_cpu_to_be_32(rte_be_to_cpu_32(ipv4_hdr->src_addr) + 1);
+ } else if (ethertype == _htons(ETHER_TYPE_IPv6)) {
+ ipv6_hdr->src_addr[15] = ipv6_hdr->src_addr[15] + 1;
+ }
+}
+
+/* if possible, calculate the checksum of a packet in hw or sw,
+ * depending on the testpmd command line configuration */
+static uint64_t
+process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
+ uint16_t testpmd_ol_flags)
+{
+ struct ipv4_hdr *ipv4_hdr = l3_hdr;
+ struct udp_hdr *udp_hdr;
+ struct tcp_hdr *tcp_hdr;
+ struct sctp_hdr *sctp_hdr;
+ uint64_t ol_flags = 0;
+
+ if (info->ethertype == _htons(ETHER_TYPE_IPv4)) {
+ ipv4_hdr = l3_hdr;
+ ipv4_hdr->hdr_checksum = 0;
+
+ ol_flags |= PKT_TX_IPV4;
+ if (info->tso_segsz != 0 && info->l4_proto == IPPROTO_TCP) {
+ ol_flags |= PKT_TX_IP_CKSUM;
+ } else {
+ if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
+ ol_flags |= PKT_TX_IP_CKSUM;
+ else
+ ipv4_hdr->hdr_checksum =
+ rte_ipv4_cksum(ipv4_hdr);
+ }
+ } else if (info->ethertype == _htons(ETHER_TYPE_IPv6))
+ ol_flags |= PKT_TX_IPV6;
+ else
+ return 0; /* packet type not supported, nothing to do */
+
+ if (info->l4_proto == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
+ /* do not recalculate udp cksum if it was 0 */
+ if (udp_hdr->dgram_cksum != 0) {
+ udp_hdr->dgram_cksum = 0;
+ if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
+ ol_flags |= PKT_TX_UDP_CKSUM;
+ udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
+ info->ethertype, ol_flags);
+ } else {
+ udp_hdr->dgram_cksum =
+ get_udptcp_checksum(l3_hdr, udp_hdr,
+ info->ethertype);
+ }
+ }
+ } else if (info->l4_proto == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
+ tcp_hdr->cksum = 0;
+ if (info->tso_segsz != 0) {
+ ol_flags |= PKT_TX_TCP_SEG;
+ tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
+ ol_flags);
+ } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
+ ol_flags |= PKT_TX_TCP_CKSUM;
+ tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
+ ol_flags);
+ } else {
+ tcp_hdr->cksum =
+ get_udptcp_checksum(l3_hdr, tcp_hdr,
+ info->ethertype);
+ }
+ } else if (info->l4_proto == IPPROTO_SCTP) {
+ sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len);
+ sctp_hdr->cksum = 0;
+ /* sctp payload must be a multiple of 4 to be
+ * offloaded */
+ if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
+ ((ipv4_hdr->total_length & 0x3) == 0)) {
+ ol_flags |= PKT_TX_SCTP_CKSUM;
+ } else {
+ /* XXX implement CRC32c, example available in
+ * RFC3309 */
+ }
+ }
+
+ return ol_flags;
+}
+
+/* Calculate the checksum of outer header (only vxlan is supported,
+ * meaning IP + UDP). The caller already checked that it's a vxlan
+ * packet */
+static uint64_t
+process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
+ uint16_t testpmd_ol_flags)
+{
+ struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
+ struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
+ struct udp_hdr *udp_hdr;
+ uint64_t ol_flags = 0;
+
+ if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
+ ipv4_hdr->hdr_checksum = 0;
+ ol_flags |= PKT_TX_OUTER_IPV4;
+
+ if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
+ ol_flags |= PKT_TX_OUTER_IP_CKSUM;
+ else
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
+ ol_flags |= PKT_TX_OUTER_IPV6;
+
+ if (info->outer_l4_proto != IPPROTO_UDP)
+ return ol_flags;
+
+ /* outer UDP checksum is always done in software as we have no
+ * hardware supporting it today, and no API for it. */
+
+ udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
+ /* do not recalculate udp cksum if it was 0 */
+ if (udp_hdr->dgram_cksum != 0) {
+ udp_hdr->dgram_cksum = 0;
+ if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4))
+ udp_hdr->dgram_cksum =
+ rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
+ else
+ udp_hdr->dgram_cksum =
+ rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
+ }
+
+ return ol_flags;
+}
+
+/*
+ * Helper function.
+ * Performs actual copying.
+ * Returns number of segments in the destination mbuf on success,
+ * or negative error code on failure.
+ */
+static int
+mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
+ uint16_t seglen[], uint8_t nb_seg)
+{
+ uint32_t dlen, slen, tlen;
+ uint32_t i, len;
+ const struct rte_mbuf *m;
+ const uint8_t *src;
+ uint8_t *dst;
+
+ dlen = 0;
+ slen = 0;
+ tlen = 0;
+
+ dst = NULL;
+ src = NULL;
+
+ m = ms;
+ i = 0;
+ while (ms != NULL && i != nb_seg) {
+
+ if (slen == 0) {
+ slen = rte_pktmbuf_data_len(ms);
+ src = rte_pktmbuf_mtod(ms, const uint8_t *);
+ }
+
+ if (dlen == 0) {
+ dlen = RTE_MIN(seglen[i], slen);
+ md[i]->data_len = dlen;
+ md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1];
+ dst = rte_pktmbuf_mtod(md[i], uint8_t *);
+ }
+
+ len = RTE_MIN(slen, dlen);
+ memcpy(dst, src, len);
+ tlen += len;
+ slen -= len;
+ dlen -= len;
+ src += len;
+ dst += len;
+
+ if (slen == 0)
+ ms = ms->next;
+ if (dlen == 0)
+ i++;
+ }
+
+ if (ms != NULL)
+ return -ENOBUFS;
+ else if (tlen != m->pkt_len)
+ return -EINVAL;
+
+ md[0]->nb_segs = nb_seg;
+ md[0]->pkt_len = tlen;
+ md[0]->vlan_tci = m->vlan_tci;
+ md[0]->vlan_tci_outer = m->vlan_tci_outer;
+ md[0]->ol_flags = m->ol_flags;
+ md[0]->tx_offload = m->tx_offload;
+
+ return nb_seg;
+}
+
+/*
+ * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
+ * Copy packet contents and offload information into then new segmented mbuf.
+ */
+static struct rte_mbuf *
+pkt_copy_split(const struct rte_mbuf *pkt)
+{
+ int32_t n, rc;
+ uint32_t i, len, nb_seg;
+ struct rte_mempool *mp;
+ uint16_t seglen[RTE_MAX_SEGS_PER_PKT];
+ struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT];
+
+ mp = current_fwd_lcore()->mbp;
+
+ if (tx_pkt_split == TX_PKT_SPLIT_RND)
+ nb_seg = random() % tx_pkt_nb_segs + 1;
+ else
+ nb_seg = tx_pkt_nb_segs;
+
+ memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
+
+ /* calculate number of segments to use and their length. */
+ len = 0;
+ for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) {
+ len += seglen[i];
+ md[i] = NULL;
+ }
+
+ n = pkt->pkt_len - len;
+
+ /* update size of the last segment to fit rest of the packet */
+ if (n >= 0) {
+ seglen[i - 1] += n;
+ len += n;
+ }
+
+ nb_seg = i;
+ while (i != 0) {
+ p = rte_pktmbuf_alloc(mp);
+ if (p == NULL) {
+ RTE_LOG(ERR, USER1,
+ "failed to allocate %u-th of %u mbuf "
+ "from mempool: %s\n",
+ nb_seg - i, nb_seg, mp->name);
+ break;
+ }
+
+ md[--i] = p;
+ if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) {
+ RTE_LOG(ERR, USER1, "mempool %s, %u-th segment: "
+ "expected seglen: %u, "
+ "actual mbuf tailroom: %u\n",
+ mp->name, i, seglen[i],
+ rte_pktmbuf_tailroom(md[i]));
+ break;
+ }
+ }
+
+ /* all mbufs successfully allocated, do copy */
+ if (i == 0) {
+ rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
+ if (rc < 0)
+ RTE_LOG(ERR, USER1,
+ "mbuf_copy_split for %p(len=%u, nb_seg=%hhu) "
+ "into %u segments failed with error code: %d\n",
+ pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
+
+ /* figure out how many mbufs to free. */
+ i = RTE_MAX(rc, 0);
+ }
+
+ /* free unused mbufs */
+ for (; i != nb_seg; i++) {
+ rte_pktmbuf_free_seg(md[i]);
+ md[i] = NULL;
+ }
+
+ return md[0];
+}
+
+/*
+ * Receive a burst of packets, and for each packet:
+ * - parse packet, and try to recognize a supported packet type (1)
+ * - if it's not a supported packet type, don't touch the packet, else:
+ * - modify the IPs in inner headers and in outer headers if any
+ * - reprocess the checksum of all supported layers. This is done in SW
+ * or HW, depending on testpmd command line configuration
+ * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
+ * segmentation offload (this implies HW TCP checksum)
+ * Then transmit packets on the output port.
+ *
+ * (1) Supported packets are:
+ * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
+ * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
+ * UDP|TCP|SCTP
+ * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
+ * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
+ * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
+ *
+ * The testpmd command line for this forward engine sets the flags
+ * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
+ * wether a checksum must be calculated in software or in hardware. The
+ * IP, UDP, TCP and SCTP flags always concern the inner layer. The
+ * OUTER_IP is only useful for tunnel packets.
+ */
+static void
+pkt_burst_checksum_forward(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_port *txp;
+ struct rte_mbuf *m, *p;
+ struct ether_hdr *eth_hdr;
+ void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+ uint16_t i;
+ uint64_t ol_flags;
+ uint16_t testpmd_ol_flags;
+ uint32_t rx_bad_ip_csum;
+ uint32_t rx_bad_l4_csum;
+ struct testpmd_offload_info info;
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /* receive a burst of packet */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+ fs->rx_packets += nb_rx;
+ rx_bad_ip_csum = 0;
+ rx_bad_l4_csum = 0;
+
+ txp = &ports[fs->tx_port];
+ testpmd_ol_flags = txp->tx_ol_flags;
+ memset(&info, 0, sizeof(info));
+ info.tso_segsz = txp->tso_segsz;
+
+ for (i = 0; i < nb_rx; i++) {
+
+ ol_flags = 0;
+ info.is_tunnel = 0;
+ m = pkts_burst[i];
+
+ /* Update the L3/L4 checksum error packet statistics */
+ rx_bad_ip_csum += ((m->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
+ rx_bad_l4_csum += ((m->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);
+
+ /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
+ * and inner headers */
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+ &eth_hdr->d_addr);
+ ether_addr_copy(&ports[fs->tx_port].eth_addr,
+ &eth_hdr->s_addr);
+ parse_ethernet(eth_hdr, &info);
+ l3_hdr = (char *)eth_hdr + info.l2_len;
+
+ /* check if it's a supported tunnel */
+ if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) {
+ if (info.l4_proto == IPPROTO_UDP) {
+ struct udp_hdr *udp_hdr;
+ udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
+ info.l3_len);
+ parse_vxlan(udp_hdr, &info, m->packet_type);
+ } else if (info.l4_proto == IPPROTO_GRE) {
+ struct simple_gre_hdr *gre_hdr;
+ gre_hdr = (struct simple_gre_hdr *)
+ ((char *)l3_hdr + info.l3_len);
+ parse_gre(gre_hdr, &info);
+ } else if (info.l4_proto == IPPROTO_IPIP) {
+ void *encap_ip_hdr;
+ encap_ip_hdr = (char *)l3_hdr + info.l3_len;
+ parse_encap_ip(encap_ip_hdr, &info);
+ }
+ }
+
+ /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
+ if (info.is_tunnel) {
+ outer_l3_hdr = l3_hdr;
+ l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
+ }
+
+ /* step 2: change all source IPs (v4 or v6) so we need
+ * to recompute the chksums even if they were correct */
+
+ change_ip_addresses(l3_hdr, info.ethertype);
+ if (info.is_tunnel == 1)
+ change_ip_addresses(outer_l3_hdr, info.outer_ethertype);
+
+ /* step 3: depending on user command line configuration,
+ * recompute checksum either in software or flag the
+ * mbuf to offload the calculation to the NIC. If TSO
+ * is configured, prepare the mbuf for TCP segmentation. */
+
+ /* process checksums of inner headers first */
+ ol_flags |= process_inner_cksums(l3_hdr, &info, testpmd_ol_flags);
+
+ /* Then process outer headers if any. Note that the software
+ * checksum will be wrong if one of the inner checksums is
+ * processed in hardware. */
+ if (info.is_tunnel == 1) {
+ ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
+ testpmd_ol_flags);
+ }
+
+ /* step 4: fill the mbuf meta data (flags and header lengths) */
+
+ if (info.is_tunnel == 1) {
+ if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) {
+ m->outer_l2_len = info.outer_l2_len;
+ m->outer_l3_len = info.outer_l3_len;
+ m->l2_len = info.l2_len;
+ m->l3_len = info.l3_len;
+ m->l4_len = info.l4_len;
+ }
+ else {
+ /* if there is a outer UDP cksum
+ processed in sw and the inner in hw,
+ the outer checksum will be wrong as
+ the payload will be modified by the
+ hardware */
+ m->l2_len = info.outer_l2_len +
+ info.outer_l3_len + info.l2_len;
+ m->l3_len = info.l3_len;
+ m->l4_len = info.l4_len;
+ }
+ } else {
+ /* this is only useful if an offload flag is
+ * set, but it does not hurt to fill it in any
+ * case */
+ m->l2_len = info.l2_len;
+ m->l3_len = info.l3_len;
+ m->l4_len = info.l4_len;
+ }
+ m->tso_segsz = info.tso_segsz;
+ m->ol_flags = ol_flags;
+
+ /* Do split & copy for the packet. */
+ if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
+ p = pkt_copy_split(m);
+ if (p != NULL) {
+ rte_pktmbuf_free(m);
+ m = p;
+ pkts_burst[i] = m;
+ }
+ }
+
+ /* if verbose mode is enabled, dump debug info */
+ if (verbose_level > 0) {
+ struct {
+ uint64_t flag;
+ uint64_t mask;
+ } tx_flags[] = {
+ { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM },
+ { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK },
+ { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK },
+ { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK },
+ { PKT_TX_IPV4, PKT_TX_IPV4 },
+ { PKT_TX_IPV6, PKT_TX_IPV6 },
+ { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM },
+ { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4 },
+ { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6 },
+ { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG },
+ };
+ unsigned j;
+ const char *name;
+
+ printf("-----------------\n");
+ printf("mbuf=%p, pkt_len=%u, nb_segs=%hhu:\n",
+ m, m->pkt_len, m->nb_segs);
+ /* dump rx parsed packet info */
+ printf("rx: l2_len=%d ethertype=%x l3_len=%d "
+ "l4_proto=%d l4_len=%d\n",
+ info.l2_len, rte_be_to_cpu_16(info.ethertype),
+ info.l3_len, info.l4_proto, info.l4_len);
+ if (info.is_tunnel == 1)
+ printf("rx: outer_l2_len=%d outer_ethertype=%x "
+ "outer_l3_len=%d\n", info.outer_l2_len,
+ rte_be_to_cpu_16(info.outer_ethertype),
+ info.outer_l3_len);
+ /* dump tx packet info */
+ if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM |
+ TESTPMD_TX_OFFLOAD_UDP_CKSUM |
+ TESTPMD_TX_OFFLOAD_TCP_CKSUM |
+ TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) ||
+ info.tso_segsz != 0)
+ printf("tx: m->l2_len=%d m->l3_len=%d "
+ "m->l4_len=%d\n",
+ m->l2_len, m->l3_len, m->l4_len);
+ if ((info.is_tunnel == 1) &&
+ (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM))
+ printf("tx: m->outer_l2_len=%d m->outer_l3_len=%d\n",
+ m->outer_l2_len, m->outer_l3_len);
+ if (info.tso_segsz != 0)
+ printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
+ printf("tx: flags=");
+ for (j = 0; j < sizeof(tx_flags)/sizeof(*tx_flags); j++) {
+ name = rte_get_tx_ol_flag_name(tx_flags[j].flag);
+ if ((m->ol_flags & tx_flags[j].mask) ==
+ tx_flags[j].flag)
+ printf("%s ", name);
+ }
+ printf("\n");
+ }
+ }
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ fs->tx_packets += nb_tx;
+ fs->rx_bad_ip_csum += rx_bad_ip_csum;
+ fs->rx_bad_l4_csum += rx_bad_l4_csum;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+ if (unlikely(nb_tx < nb_rx)) {
+ fs->fwd_dropped += (nb_rx - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_rx);
+ }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine csum_fwd_engine = {
+ .fwd_mode_name = "csum",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_checksum_forward,
+};
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
new file mode 100644
index 00000000..0f307e85
--- /dev/null
+++ b/app/test-pmd/flowgen.c
@@ -0,0 +1,248 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Tilera Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Tilera Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+/* hardcoded configuration (for now) */
+static unsigned cfg_n_flows = 1024;
+static unsigned cfg_pkt_size = 300;
+static uint32_t cfg_ip_src = IPv4(10, 254, 0, 0);
+static uint32_t cfg_ip_dst = IPv4(10, 253, 0, 0);
+static uint16_t cfg_udp_src = 1000;
+static uint16_t cfg_udp_dst = 1001;
+static struct ether_addr cfg_ether_src =
+ {{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x00 }};
+static struct ether_addr cfg_ether_dst =
+ {{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x01 }};
+
+#define IP_DEFTTL 64 /* from RFC 1340. */
+#define IP_VERSION 0x40
+#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
+#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
+
+static inline struct rte_mbuf *
+tx_mbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return m;
+}
+
+
+static inline uint16_t
+ip_sum(const unaligned_uint16_t *hdr, int hdr_len)
+{
+ uint32_t sum = 0;
+
+ while (hdr_len > 1)
+ {
+ sum += *hdr++;
+ if (sum & 0x80000000)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+ hdr_len -= 2;
+ }
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ return ~sum;
+}
+
+/*
+ * Multi-flow generation mode.
+ *
+ * We originate a bunch of flows (varying destination IP addresses), and
+ * terminate receive traffic. Received traffic is simply discarded, but we
+ * still do so in order to maintain traffic statistics.
+ */
+static void
+pkt_burst_flow_gen(struct fwd_stream *fs)
+{
+ unsigned pkt_size = cfg_pkt_size - 4; /* Adjust FCS */
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mempool *mbp;
+ struct rte_mbuf *pkt;
+ struct ether_hdr *eth_hdr;
+ struct ipv4_hdr *ip_hdr;
+ struct udp_hdr *udp_hdr;
+ uint16_t vlan_tci, vlan_tci_outer;
+ uint16_t ol_flags;
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+ uint16_t nb_pkt;
+ uint16_t i;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+ static int next_flow = 0;
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /* Receive a burst of packets and discard them. */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ fs->rx_packets += nb_rx;
+
+ for (i = 0; i < nb_rx; i++)
+ rte_pktmbuf_free(pkts_burst[i]);
+
+ mbp = current_fwd_lcore()->mbp;
+ vlan_tci = ports[fs->tx_port].tx_vlan_id;
+ vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
+ ol_flags = ports[fs->tx_port].tx_ol_flags;
+
+ for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
+ pkt = tx_mbuf_alloc(mbp);
+ if (!pkt)
+ break;
+
+ pkt->data_len = pkt_size;
+ pkt->next = NULL;
+
+ /* Initialize Ethernet header. */
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ether_addr_copy(&cfg_ether_dst, &eth_hdr->d_addr);
+ ether_addr_copy(&cfg_ether_src, &eth_hdr->s_addr);
+ eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+
+ /* Initialize IP header. */
+ ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ memset(ip_hdr, 0, sizeof(*ip_hdr));
+ ip_hdr->version_ihl = IP_VHL_DEF;
+ ip_hdr->type_of_service = 0;
+ ip_hdr->fragment_offset = 0;
+ ip_hdr->time_to_live = IP_DEFTTL;
+ ip_hdr->next_proto_id = IPPROTO_UDP;
+ ip_hdr->packet_id = 0;
+ ip_hdr->src_addr = rte_cpu_to_be_32(cfg_ip_src);
+ ip_hdr->dst_addr = rte_cpu_to_be_32(cfg_ip_dst +
+ next_flow);
+ ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_size -
+ sizeof(*eth_hdr));
+ ip_hdr->hdr_checksum = ip_sum((unaligned_uint16_t *)ip_hdr,
+ sizeof(*ip_hdr));
+
+ /* Initialize UDP header. */
+ udp_hdr = (struct udp_hdr *)(ip_hdr + 1);
+ udp_hdr->src_port = rte_cpu_to_be_16(cfg_udp_src);
+ udp_hdr->dst_port = rte_cpu_to_be_16(cfg_udp_dst);
+ udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
+ udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_size -
+ sizeof(*eth_hdr) -
+ sizeof(*ip_hdr));
+ pkt->nb_segs = 1;
+ pkt->pkt_len = pkt_size;
+ pkt->ol_flags = ol_flags;
+ pkt->vlan_tci = vlan_tci;
+ pkt->vlan_tci_outer = vlan_tci_outer;
+ pkt->l2_len = sizeof(struct ether_hdr);
+ pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkts_burst[nb_pkt] = pkt;
+
+ next_flow = (next_flow + 1) % cfg_n_flows;
+ }
+
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+ fs->tx_packets += nb_tx;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+ if (unlikely(nb_tx < nb_pkt)) {
+ /* Back out the flow counter. */
+ next_flow -= (nb_pkt - nb_tx);
+ while (next_flow < 0)
+ next_flow += cfg_n_flows;
+
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_pkt);
+ }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine flow_gen_engine = {
+ .fwd_mode_name = "flowgen",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_flow_gen,
+};
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
new file mode 100644
index 00000000..e510f9bf
--- /dev/null
+++ b/app/test-pmd/icmpecho.c
@@ -0,0 +1,542 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 6WIND
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_arp.h>
+#include <rte_ip.h>
+#include <rte_icmp.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+static const char *
+arp_op_name(uint16_t arp_op)
+{
+ switch (arp_op ) {
+ case ARP_OP_REQUEST:
+ return "ARP Request";
+ case ARP_OP_REPLY:
+ return "ARP Reply";
+ case ARP_OP_REVREQUEST:
+ return "Reverse ARP Request";
+ case ARP_OP_REVREPLY:
+ return "Reverse ARP Reply";
+ case ARP_OP_INVREQUEST:
+ return "Peer Identify Request";
+ case ARP_OP_INVREPLY:
+ return "Peer Identify Reply";
+ default:
+ break;
+ }
+ return "Unkwown ARP op";
+}
+
+static const char *
+ip_proto_name(uint16_t ip_proto)
+{
+ static const char * ip_proto_names[] = {
+ "IP6HOPOPTS", /**< IP6 hop-by-hop options */
+ "ICMP", /**< control message protocol */
+ "IGMP", /**< group mgmt protocol */
+ "GGP", /**< gateway^2 (deprecated) */
+ "IPv4", /**< IPv4 encapsulation */
+
+ "UNASSIGNED",
+ "TCP", /**< transport control protocol */
+ "ST", /**< Stream protocol II */
+ "EGP", /**< exterior gateway protocol */
+ "PIGP", /**< private interior gateway */
+
+ "RCC_MON", /**< BBN RCC Monitoring */
+ "NVPII", /**< network voice protocol*/
+ "PUP", /**< pup */
+ "ARGUS", /**< Argus */
+ "EMCON", /**< EMCON */
+
+ "XNET", /**< Cross Net Debugger */
+ "CHAOS", /**< Chaos*/
+ "UDP", /**< user datagram protocol */
+ "MUX", /**< Multiplexing */
+ "DCN_MEAS", /**< DCN Measurement Subsystems */
+
+ "HMP", /**< Host Monitoring */
+ "PRM", /**< Packet Radio Measurement */
+ "XNS_IDP", /**< xns idp */
+ "TRUNK1", /**< Trunk-1 */
+ "TRUNK2", /**< Trunk-2 */
+
+ "LEAF1", /**< Leaf-1 */
+ "LEAF2", /**< Leaf-2 */
+ "RDP", /**< Reliable Data */
+ "IRTP", /**< Reliable Transaction */
+ "TP4", /**< tp-4 w/ class negotiation */
+
+ "BLT", /**< Bulk Data Transfer */
+ "NSP", /**< Network Services */
+ "INP", /**< Merit Internodal */
+ "SEP", /**< Sequential Exchange */
+ "3PC", /**< Third Party Connect */
+
+ "IDPR", /**< InterDomain Policy Routing */
+ "XTP", /**< XTP */
+ "DDP", /**< Datagram Delivery */
+ "CMTP", /**< Control Message Transport */
+ "TPXX", /**< TP++ Transport */
+
+ "ILTP", /**< IL transport protocol */
+ "IPv6_HDR", /**< IP6 header */
+ "SDRP", /**< Source Demand Routing */
+ "IPv6_RTG", /**< IP6 routing header */
+ "IPv6_FRAG", /**< IP6 fragmentation header */
+
+ "IDRP", /**< InterDomain Routing*/
+ "RSVP", /**< resource reservation */
+ "GRE", /**< General Routing Encap. */
+ "MHRP", /**< Mobile Host Routing */
+ "BHA", /**< BHA */
+
+ "ESP", /**< IP6 Encap Sec. Payload */
+ "AH", /**< IP6 Auth Header */
+ "INLSP", /**< Integ. Net Layer Security */
+ "SWIPE", /**< IP with encryption */
+ "NHRP", /**< Next Hop Resolution */
+
+ "UNASSIGNED",
+ "UNASSIGNED",
+ "UNASSIGNED",
+ "ICMPv6", /**< ICMP6 */
+ "IPv6NONEXT", /**< IP6 no next header */
+
+ "Ipv6DSTOPTS",/**< IP6 destination option */
+ "AHIP", /**< any host internal protocol */
+ "CFTP", /**< CFTP */
+ "HELLO", /**< "hello" routing protocol */
+ "SATEXPAK", /**< SATNET/Backroom EXPAK */
+
+ "KRYPTOLAN", /**< Kryptolan */
+ "RVD", /**< Remote Virtual Disk */
+ "IPPC", /**< Pluribus Packet Core */
+ "ADFS", /**< Any distributed FS */
+ "SATMON", /**< Satnet Monitoring */
+
+ "VISA", /**< VISA Protocol */
+ "IPCV", /**< Packet Core Utility */
+ "CPNX", /**< Comp. Prot. Net. Executive */
+ "CPHB", /**< Comp. Prot. HeartBeat */
+ "WSN", /**< Wang Span Network */
+
+ "PVP", /**< Packet Video Protocol */
+ "BRSATMON", /**< BackRoom SATNET Monitoring */
+ "ND", /**< Sun net disk proto (temp.) */
+ "WBMON", /**< WIDEBAND Monitoring */
+ "WBEXPAK", /**< WIDEBAND EXPAK */
+
+ "EON", /**< ISO cnlp */
+ "VMTP", /**< VMTP */
+ "SVMTP", /**< Secure VMTP */
+ "VINES", /**< Banyon VINES */
+ "TTP", /**< TTP */
+
+ "IGP", /**< NSFNET-IGP */
+ "DGP", /**< dissimilar gateway prot. */
+ "TCF", /**< TCF */
+ "IGRP", /**< Cisco/GXS IGRP */
+ "OSPFIGP", /**< OSPFIGP */
+
+ "SRPC", /**< Strite RPC protocol */
+ "LARP", /**< Locus Address Resoloution */
+ "MTP", /**< Multicast Transport */
+ "AX25", /**< AX.25 Frames */
+ "4IN4", /**< IP encapsulated in IP */
+
+ "MICP", /**< Mobile Int.ing control */
+ "SCCSP", /**< Semaphore Comm. security */
+ "ETHERIP", /**< Ethernet IP encapsulation */
+ "ENCAP", /**< encapsulation header */
+ "AES", /**< any private encr. scheme */
+
+ "GMTP", /**< GMTP */
+ "IPCOMP", /**< payload compression (IPComp) */
+ "UNASSIGNED",
+ "UNASSIGNED",
+ "PIM", /**< Protocol Independent Mcast */
+ };
+
+ if (ip_proto < sizeof(ip_proto_names) / sizeof(ip_proto_names[0]))
+ return ip_proto_names[ip_proto];
+ switch (ip_proto) {
+#ifdef IPPROTO_PGM
+ case IPPROTO_PGM: /**< PGM */
+ return "PGM";
+#endif
+ case IPPROTO_SCTP: /**< Stream Control Transport Protocol */
+ return "SCTP";
+#ifdef IPPROTO_DIVERT
+ case IPPROTO_DIVERT: /**< divert pseudo-protocol */
+ return "DIVERT";
+#endif
+ case IPPROTO_RAW: /**< raw IP packet */
+ return "RAW";
+ default:
+ break;
+ }
+ return "UNASSIGNED";
+}
+
+static void
+ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf)
+{
+ uint32_t ipv4_addr;
+
+ ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
+ sprintf(buf, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
+ (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
+ ipv4_addr & 0xFF);
+}
+
+static void
+ether_addr_dump(const char *what, const struct ether_addr *ea)
+{
+ char buf[ETHER_ADDR_FMT_SIZE];
+
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, ea);
+ if (what)
+ printf("%s", what);
+ printf("%s", buf);
+}
+
+static void
+ipv4_addr_dump(const char *what, uint32_t be_ipv4_addr)
+{
+ char buf[16];
+
+ ipv4_addr_to_dot(be_ipv4_addr, buf);
+ if (what)
+ printf("%s", what);
+ printf("%s", buf);
+}
+
+static uint16_t
+ipv4_hdr_cksum(struct ipv4_hdr *ip_h)
+{
+ uint16_t *v16_h;
+ uint32_t ip_cksum;
+
+ /*
+ * Compute the sum of successive 16-bit words of the IPv4 header,
+ * skipping the checksum field of the header.
+ */
+ v16_h = (unaligned_uint16_t *) ip_h;
+ ip_cksum = v16_h[0] + v16_h[1] + v16_h[2] + v16_h[3] +
+ v16_h[4] + v16_h[6] + v16_h[7] + v16_h[8] + v16_h[9];
+
+ /* reduce 32 bit checksum to 16 bits and complement it */
+ ip_cksum = (ip_cksum & 0xffff) + (ip_cksum >> 16);
+ ip_cksum = (ip_cksum & 0xffff) + (ip_cksum >> 16);
+ ip_cksum = (~ip_cksum) & 0x0000FFFF;
+ return (ip_cksum == 0) ? 0xFFFF : (uint16_t) ip_cksum;
+}
+
+#define is_multicast_ipv4_addr(ipv4_addr) \
+ (((rte_be_to_cpu_32((ipv4_addr)) >> 24) & 0x000000FF) == 0xE0)
+
+/*
+ * Receive a burst of packets, lookup for ICMP echo requets, and, if any,
+ * send back ICMP echo replies.
+ */
+static void
+reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *pkt;
+ struct ether_hdr *eth_h;
+ struct vlan_hdr *vlan_h;
+ struct arp_hdr *arp_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+ struct ether_addr eth_addr;
+ uint32_t ip_addr;
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+ uint16_t nb_replies;
+ uint16_t eth_type;
+ uint16_t vlan_id;
+ uint16_t arp_op;
+ uint16_t arp_pro;
+ uint32_t cksum;
+ uint8_t i;
+ int l2_len;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /*
+ * First, receive a burst of packets.
+ */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+ fs->rx_packets += nb_rx;
+ nb_replies = 0;
+ for (i = 0; i < nb_rx; i++) {
+ pkt = pkts_burst[i];
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ eth_type = RTE_BE_TO_CPU_16(eth_h->ether_type);
+ l2_len = sizeof(struct ether_hdr);
+ if (verbose_level > 0) {
+ printf("\nPort %d pkt-len=%u nb-segs=%u\n",
+ fs->rx_port, pkt->pkt_len, pkt->nb_segs);
+ ether_addr_dump(" ETH: src=", &eth_h->s_addr);
+ ether_addr_dump(" dst=", &eth_h->d_addr);
+ }
+ if (eth_type == ETHER_TYPE_VLAN) {
+ vlan_h = (struct vlan_hdr *)
+ ((char *)eth_h + sizeof(struct ether_hdr));
+ l2_len += sizeof(struct vlan_hdr);
+ eth_type = rte_be_to_cpu_16(vlan_h->eth_proto);
+ if (verbose_level > 0) {
+ vlan_id = rte_be_to_cpu_16(vlan_h->vlan_tci)
+ & 0xFFF;
+ printf(" [vlan id=%u]", vlan_id);
+ }
+ }
+ if (verbose_level > 0) {
+ printf(" type=0x%04x\n", eth_type);
+ }
+
+ /* Reply to ARP requests */
+ if (eth_type == ETHER_TYPE_ARP) {
+ arp_h = (struct arp_hdr *) ((char *)eth_h + l2_len);
+ arp_op = RTE_BE_TO_CPU_16(arp_h->arp_op);
+ arp_pro = RTE_BE_TO_CPU_16(arp_h->arp_pro);
+ if (verbose_level > 0) {
+ printf(" ARP: hrd=%d proto=0x%04x hln=%d "
+ "pln=%d op=%u (%s)\n",
+ RTE_BE_TO_CPU_16(arp_h->arp_hrd),
+ arp_pro, arp_h->arp_hln,
+ arp_h->arp_pln, arp_op,
+ arp_op_name(arp_op));
+ }
+ if ((RTE_BE_TO_CPU_16(arp_h->arp_hrd) !=
+ ARP_HRD_ETHER) ||
+ (arp_pro != ETHER_TYPE_IPv4) ||
+ (arp_h->arp_hln != 6) ||
+ (arp_h->arp_pln != 4)
+ ) {
+ rte_pktmbuf_free(pkt);
+ if (verbose_level > 0)
+ printf("\n");
+ continue;
+ }
+ if (verbose_level > 0) {
+ ether_addr_copy(&arp_h->arp_data.arp_sha, &eth_addr);
+ ether_addr_dump(" sha=", &eth_addr);
+ ip_addr = arp_h->arp_data.arp_sip;
+ ipv4_addr_dump(" sip=", ip_addr);
+ printf("\n");
+ ether_addr_copy(&arp_h->arp_data.arp_tha, &eth_addr);
+ ether_addr_dump(" tha=", &eth_addr);
+ ip_addr = arp_h->arp_data.arp_tip;
+ ipv4_addr_dump(" tip=", ip_addr);
+ printf("\n");
+ }
+ if (arp_op != ARP_OP_REQUEST) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+
+ /*
+ * Build ARP reply.
+ */
+
+ /* Use source MAC address as destination MAC address. */
+ ether_addr_copy(&eth_h->s_addr, &eth_h->d_addr);
+ /* Set source MAC address with MAC address of TX port */
+ ether_addr_copy(&ports[fs->tx_port].eth_addr,
+ &eth_h->s_addr);
+
+ arp_h->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
+ ether_addr_copy(&arp_h->arp_data.arp_tha, &eth_addr);
+ ether_addr_copy(&arp_h->arp_data.arp_sha, &arp_h->arp_data.arp_tha);
+ ether_addr_copy(&eth_h->s_addr, &arp_h->arp_data.arp_sha);
+
+ /* Swap IP addresses in ARP payload */
+ ip_addr = arp_h->arp_data.arp_sip;
+ arp_h->arp_data.arp_sip = arp_h->arp_data.arp_tip;
+ arp_h->arp_data.arp_tip = ip_addr;
+ pkts_burst[nb_replies++] = pkt;
+ continue;
+ }
+
+ if (eth_type != ETHER_TYPE_IPv4) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+ ip_h = (struct ipv4_hdr *) ((char *)eth_h + l2_len);
+ if (verbose_level > 0) {
+ ipv4_addr_dump(" IPV4: src=", ip_h->src_addr);
+ ipv4_addr_dump(" dst=", ip_h->dst_addr);
+ printf(" proto=%d (%s)\n",
+ ip_h->next_proto_id,
+ ip_proto_name(ip_h->next_proto_id));
+ }
+
+ /*
+ * Check if packet is a ICMP echo request.
+ */
+ icmp_h = (struct icmp_hdr *) ((char *)ip_h +
+ sizeof(struct ipv4_hdr));
+ if (! ((ip_h->next_proto_id == IPPROTO_ICMP) &&
+ (icmp_h->icmp_type == IP_ICMP_ECHO_REQUEST) &&
+ (icmp_h->icmp_code == 0))) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+
+ if (verbose_level > 0)
+ printf(" ICMP: echo request seq id=%d\n",
+ rte_be_to_cpu_16(icmp_h->icmp_seq_nb));
+
+ /*
+ * Prepare ICMP echo reply to be sent back.
+ * - switch ethernet source and destinations addresses,
+ * - use the request IP source address as the reply IP
+ * destination address,
+ * - if the request IP destination address is a multicast
+ * address:
+ * - choose a reply IP source address different from the
+ * request IP source address,
+ * - re-compute the IP header checksum.
+ * Otherwise:
+ * - switch the request IP source and destination
+ * addresses in the reply IP header,
+ * - keep the IP header checksum unchanged.
+ * - set IP_ICMP_ECHO_REPLY in ICMP header.
+ * ICMP checksum is computed by assuming it is valid in the
+ * echo request and not verified.
+ */
+ ether_addr_copy(&eth_h->s_addr, &eth_addr);
+ ether_addr_copy(&eth_h->d_addr, &eth_h->s_addr);
+ ether_addr_copy(&eth_addr, &eth_h->d_addr);
+ ip_addr = ip_h->src_addr;
+ if (is_multicast_ipv4_addr(ip_h->dst_addr)) {
+ uint32_t ip_src;
+
+ ip_src = rte_be_to_cpu_32(ip_addr);
+ if ((ip_src & 0x00000003) == 1)
+ ip_src = (ip_src & 0xFFFFFFFC) | 0x00000002;
+ else
+ ip_src = (ip_src & 0xFFFFFFFC) | 0x00000001;
+ ip_h->src_addr = rte_cpu_to_be_32(ip_src);
+ ip_h->dst_addr = ip_addr;
+ ip_h->hdr_checksum = ipv4_hdr_cksum(ip_h);
+ } else {
+ ip_h->src_addr = ip_h->dst_addr;
+ ip_h->dst_addr = ip_addr;
+ }
+ icmp_h->icmp_type = IP_ICMP_ECHO_REPLY;
+ cksum = ~icmp_h->icmp_cksum & 0xffff;
+ cksum += ~htons(IP_ICMP_ECHO_REQUEST << 8) & 0xffff;
+ cksum += htons(IP_ICMP_ECHO_REPLY << 8);
+ cksum = (cksum & 0xffff) + (cksum >> 16);
+ cksum = (cksum & 0xffff) + (cksum >> 16);
+ icmp_h->icmp_cksum = ~cksum;
+ pkts_burst[nb_replies++] = pkt;
+ }
+
+ /* Send back ICMP echo replies, if any. */
+ if (nb_replies > 0) {
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
+ nb_replies);
+ fs->tx_packets += nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+ if (unlikely(nb_tx < nb_replies)) {
+ fs->fwd_dropped += (nb_replies - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_replies);
+ }
+ }
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine icmp_echo_engine = {
+ .fwd_mode_name = "icmpecho",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = reply_to_icmp_echo_rqsts,
+};
diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c
new file mode 100644
index 00000000..0d3b37a7
--- /dev/null
+++ b/app/test-pmd/ieee1588fwd.c
@@ -0,0 +1,248 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+
+#include "testpmd.h"
+
+/**
+ * The structure of a PTP V2 packet.
+ *
+ * Only the minimum fields used by the ieee1588 test are represented.
+ */
+struct ptpv2_msg {
+ uint8_t msg_id;
+ uint8_t version; /**< must be 0x02 */
+ uint8_t unused[34];
+};
+
+#define PTP_SYNC_MESSAGE 0x0
+#define PTP_DELAY_REQ_MESSAGE 0x1
+#define PTP_PATH_DELAY_REQ_MESSAGE 0x2
+#define PTP_PATH_DELAY_RESP_MESSAGE 0x3
+#define PTP_FOLLOWUP_MESSAGE 0x8
+#define PTP_DELAY_RESP_MESSAGE 0x9
+#define PTP_PATH_DELAY_FOLLOWUP_MESSAGE 0xA
+#define PTP_ANNOUNCE_MESSAGE 0xB
+#define PTP_SIGNALLING_MESSAGE 0xC
+#define PTP_MANAGEMENT_MESSAGE 0xD
+
+/*
+ * Forwarding of IEEE1588 Precise Time Protocol (PTP) packets.
+ *
+ * In this mode, packets are received one by one and are expected to be
+ * PTP V2 L2 Ethernet frames (with the specific Ethernet type "0x88F7")
+ * containing PTP "sync" messages (version 2 at offset 1, and message ID
+ * 0 at offset 0).
+ *
+ * Check that each received packet is a IEEE1588 PTP V2 packet of type
+ * PTP_SYNC_MESSAGE, and that it has been identified and timestamped
+ * by the hardware.
+ * Check that the value of the last RX timestamp recorded by the controller
+ * is greater than the previous one.
+ *
+ * If everything is OK, send the received packet back on the same port,
+ * requesting for it to be timestamped by the hardware.
+ * Check that the value of the last TX timestamp recorded by the controller
+ * is greater than the previous one.
+ */
+
+static void
+port_ieee1588_rx_timestamp_check(portid_t pi, uint32_t index)
+{
+ struct timespec timestamp = {0, 0};
+
+ if (rte_eth_timesync_read_rx_timestamp(pi, &timestamp, index) < 0) {
+ printf("Port %u RX timestamp registers not valid\n",
+ (unsigned) pi);
+ return;
+ }
+ printf("Port %u RX timestamp value %lu s %lu ns\n",
+ (unsigned) pi, timestamp.tv_sec, timestamp.tv_nsec);
+}
+
+#define MAX_TX_TMST_WAIT_MICROSECS 1000 /**< 1 milli-second */
+
+static void
+port_ieee1588_tx_timestamp_check(portid_t pi)
+{
+ struct timespec timestamp = {0, 0};
+ unsigned wait_us = 0;
+
+ while ((rte_eth_timesync_read_tx_timestamp(pi, &timestamp) < 0) &&
+ (wait_us < MAX_TX_TMST_WAIT_MICROSECS)) {
+ rte_delay_us(1);
+ wait_us++;
+ }
+ if (wait_us >= MAX_TX_TMST_WAIT_MICROSECS) {
+ printf("Port %u TX timestamp registers not valid after "
+ "%u micro-seconds\n",
+ (unsigned) pi, (unsigned) MAX_TX_TMST_WAIT_MICROSECS);
+ return;
+ }
+ printf("Port %u TX timestamp value %lu s %lu ns validated after "
+ "%u micro-second%s\n",
+ (unsigned) pi, timestamp.tv_sec, timestamp.tv_nsec, wait_us,
+ (wait_us == 1) ? "" : "s");
+}
+
+static void
+ieee1588_packet_fwd(struct fwd_stream *fs)
+{
+ struct rte_mbuf *mb;
+ struct ether_hdr *eth_hdr;
+ struct ether_addr addr;
+ struct ptpv2_msg *ptp_hdr;
+ uint16_t eth_type;
+ uint32_t timesync_index;
+
+ /*
+ * Receive 1 packet at a time.
+ */
+ if (rte_eth_rx_burst(fs->rx_port, fs->rx_queue, &mb, 1) == 0)
+ return;
+
+ fs->rx_packets += 1;
+
+ /*
+ * Check that the received packet is a PTP packet that was detected
+ * by the hardware.
+ */
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+ eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+ if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
+ if (eth_type == ETHER_TYPE_1588) {
+ printf("Port %u Received PTP packet not filtered"
+ " by hardware\n",
+ (unsigned) fs->rx_port);
+ } else {
+ printf("Port %u Received non PTP packet type=0x%4x "
+ "len=%u\n",
+ (unsigned) fs->rx_port, eth_type,
+ (unsigned) mb->pkt_len);
+ }
+ rte_pktmbuf_free(mb);
+ return;
+ }
+ if (eth_type != ETHER_TYPE_1588) {
+ printf("Port %u Received NON PTP packet incorrectly"
+ " detected by hardware\n",
+ (unsigned) fs->rx_port);
+ rte_pktmbuf_free(mb);
+ return;
+ }
+
+ /*
+ * Check that the received PTP packet is a PTP V2 packet of type
+ * PTP_SYNC_MESSAGE.
+ */
+ ptp_hdr = (struct ptpv2_msg *) (rte_pktmbuf_mtod(mb, char *) +
+ sizeof(struct ether_hdr));
+ if (ptp_hdr->version != 0x02) {
+ printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
+ " protocol version 0x%x (should be 0x02)\n",
+ (unsigned) fs->rx_port, ptp_hdr->version);
+ rte_pktmbuf_free(mb);
+ return;
+ }
+ if (ptp_hdr->msg_id != PTP_SYNC_MESSAGE) {
+ printf("Port %u Received PTP V2 Ethernet frame with unexpected"
+ " message ID 0x%x (expected 0x0 - PTP_SYNC_MESSAGE)\n",
+ (unsigned) fs->rx_port, ptp_hdr->msg_id);
+ rte_pktmbuf_free(mb);
+ return;
+ }
+ printf("Port %u IEEE1588 PTP V2 SYNC Message filtered by hardware\n",
+ (unsigned) fs->rx_port);
+
+ /*
+ * Check that the received PTP packet has been timestamped by the
+ * hardware.
+ */
+ if (! (mb->ol_flags & PKT_RX_IEEE1588_TMST)) {
+ printf("Port %u Received PTP packet not timestamped"
+ " by hardware\n",
+ (unsigned) fs->rx_port);
+ rte_pktmbuf_free(mb);
+ return;
+ }
+
+ /* For i40e we need the timesync register index. It is ignored for the
+ * other PMDs. */
+ timesync_index = mb->timesync & 0x3;
+ /* Read and check the RX timestamp. */
+ port_ieee1588_rx_timestamp_check(fs->rx_port, timesync_index);
+
+ /* Swap dest and src mac addresses. */
+ ether_addr_copy(&eth_hdr->d_addr, &addr);
+ ether_addr_copy(&eth_hdr->s_addr, &eth_hdr->d_addr);
+ ether_addr_copy(&addr, &eth_hdr->s_addr);
+
+ /* Forward PTP packet with hardware TX timestamp */
+ mb->ol_flags |= PKT_TX_IEEE1588_TMST;
+ fs->tx_packets += 1;
+ if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) {
+ printf("Port %u sent PTP packet dropped\n",
+ (unsigned) fs->rx_port);
+ fs->fwd_dropped += 1;
+ rte_pktmbuf_free(mb);
+ return;
+ }
+
+ /*
+ * Check the TX timestamp.
+ */
+ port_ieee1588_tx_timestamp_check(fs->rx_port);
+}
+
+static void
+port_ieee1588_fwd_begin(portid_t pi)
+{
+ rte_eth_timesync_enable(pi);
+}
+
+static void
+port_ieee1588_fwd_end(portid_t pi)
+{
+ rte_eth_timesync_disable(pi);
+}
+
+struct fwd_engine ieee1588_fwd_engine = {
+ .fwd_mode_name = "ieee1588",
+ .port_fwd_begin = port_ieee1588_fwd_begin,
+ .port_fwd_end = port_ieee1588_fwd_end,
+ .packet_fwd = ieee1588_packet_fwd,
+};
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
new file mode 100644
index 00000000..8840d868
--- /dev/null
+++ b/app/test-pmd/iofwd.c
@@ -0,0 +1,128 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+/*
+ * Forwarding of packets in I/O mode.
+ * Forward packets "as-is".
+ * This is the fastest possible forwarding operation, as it does not access
+ * to packets data.
+ */
+static void
+pkt_burst_io_forward(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /*
+ * Receive a burst of packets and forward them.
+ */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+ fs->rx_packets += nb_rx;
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ fs->tx_packets += nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+ if (unlikely(nb_tx < nb_rx)) {
+ fs->fwd_dropped += (nb_rx - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_rx);
+ }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine io_fwd_engine = {
+ .fwd_mode_name = "io",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_io_forward,
+};
diff --git a/app/test-pmd/macfwd-retry.c b/app/test-pmd/macfwd-retry.c
new file mode 100644
index 00000000..3a96b3df
--- /dev/null
+++ b/app/test-pmd/macfwd-retry.c
@@ -0,0 +1,164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+#define BURST_TX_WAIT_US 10
+#define BURST_TX_RETRIES 5
+
+/*
+ * Global variables that control number of retires and
+ * timeout (in us) between retires.
+ */
+uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
+uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
+
+/*
+ * Forwarding of packets in MAC mode with a wait and retry on TX to reduce packet loss.
+ * Change the source and the destination Ethernet addressed of packets
+ * before forwarding them.
+ */
+static void
+pkt_burst_mac_retry_forward(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *mb;
+ struct ether_hdr *eth_hdr;
+ uint32_t retry;
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+ uint16_t i;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /*
+ * Receive a burst of packets and forward them.
+ */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+ fs->rx_packets += nb_rx;
+ for (i = 0; i < nb_rx; i++) {
+ mb = pkts_burst[i];
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+ ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+ &eth_hdr->d_addr);
+ ether_addr_copy(&ports[fs->tx_port].eth_addr,
+ &eth_hdr->s_addr);
+ }
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+
+ /*
+ * If not all packets have been TX'd then wait and retry.
+ */
+ if (unlikely(nb_tx < nb_rx)) {
+ for (retry = 0; retry < burst_tx_retry_num; retry++) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_rx - nb_tx);
+ if (nb_tx == nb_rx)
+ break;
+ }
+ }
+
+ fs->tx_packets += nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+ if (unlikely(nb_tx < nb_rx)) {
+ fs->fwd_dropped += (nb_rx - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_rx);
+ }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine mac_retry_fwd_engine = {
+ .fwd_mode_name = "mac_retry",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_mac_retry_forward,
+};
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
new file mode 100644
index 00000000..3b7fffb7
--- /dev/null
+++ b/app/test-pmd/macfwd.c
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+/*
+ * Forwarding of packets in MAC mode.
+ * Change the source and the destination Ethernet addressed of packets
+ * before forwarding them.
+ */
+static void
+pkt_burst_mac_forward(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_port *txp;
+ struct rte_mbuf *mb;
+ struct ether_hdr *eth_hdr;
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+ uint16_t i;
+ uint64_t ol_flags = 0;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /*
+ * Receive a burst of packets and forward them.
+ */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+ fs->rx_packets += nb_rx;
+ txp = &ports[fs->tx_port];
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
+ ol_flags = PKT_TX_VLAN_PKT;
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
+ ol_flags |= PKT_TX_QINQ_PKT;
+ for (i = 0; i < nb_rx; i++) {
+ mb = pkts_burst[i];
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+ ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+ &eth_hdr->d_addr);
+ ether_addr_copy(&ports[fs->tx_port].eth_addr,
+ &eth_hdr->s_addr);
+ mb->ol_flags = ol_flags;
+ mb->l2_len = sizeof(struct ether_hdr);
+ mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_tci = txp->tx_vlan_id;
+ mb->vlan_tci_outer = txp->tx_vlan_id_outer;
+ }
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ fs->tx_packets += nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+ if (unlikely(nb_tx < nb_rx)) {
+ fs->fwd_dropped += (nb_rx - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_rx);
+ }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine mac_fwd_engine = {
+ .fwd_mode_name = "mac",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_mac_forward,
+};
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
new file mode 100644
index 00000000..154889d1
--- /dev/null
+++ b/app/test-pmd/macswap.c
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Tilera Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Tilera Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+/*
+ * MAC swap forwarding mode: Swap the source and the destination Ethernet
+ * addresses of packets before forwarding them.
+ */
+static void
+pkt_burst_mac_swap(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_port *txp;
+ struct rte_mbuf *mb;
+ struct ether_hdr *eth_hdr;
+ struct ether_addr addr;
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+ uint16_t i;
+ uint64_t ol_flags = 0;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /*
+ * Receive a burst of packets and forward them.
+ */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+ fs->rx_packets += nb_rx;
+ txp = &ports[fs->tx_port];
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
+ ol_flags = PKT_TX_VLAN_PKT;
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
+ ol_flags |= PKT_TX_QINQ_PKT;
+ for (i = 0; i < nb_rx; i++) {
+ mb = pkts_burst[i];
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+
+ /* Swap dest and src mac addresses. */
+ ether_addr_copy(&eth_hdr->d_addr, &addr);
+ ether_addr_copy(&eth_hdr->s_addr, &eth_hdr->d_addr);
+ ether_addr_copy(&addr, &eth_hdr->s_addr);
+
+ mb->ol_flags = ol_flags;
+ mb->l2_len = sizeof(struct ether_hdr);
+ mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_tci = txp->tx_vlan_id;
+ mb->vlan_tci_outer = txp->tx_vlan_id_outer;
+ }
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ fs->tx_packets += nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+ if (unlikely(nb_tx < nb_rx)) {
+ fs->fwd_dropped += (nb_rx - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_rx);
+ }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine mac_swap_engine = {
+ .fwd_mode_name = "macswap",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_mac_swap,
+};
diff --git a/app/test-pmd/mempool_anon.c b/app/test-pmd/mempool_anon.c
new file mode 100644
index 00000000..47304329
--- /dev/null
+++ b/app/test-pmd/mempool_anon.c
@@ -0,0 +1,201 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include "mempool_osdep.h"
+#include <rte_errno.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+
+#define PAGEMAP_FNAME "/proc/self/pagemap"
+
+/*
+ * the pfn (page frame number) are bits 0-54 (see pagemap.txt in linux
+ * Documentation).
+ */
+#define PAGEMAP_PFN_BITS 54
+#define PAGEMAP_PFN_MASK RTE_LEN2MASK(PAGEMAP_PFN_BITS, phys_addr_t)
+
+
+static int
+get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
+{
+ int32_t fd, rc;
+ uint32_t i, nb;
+ off_t ofs;
+
+ ofs = (uintptr_t)va / pg_sz * sizeof(*pa);
+ nb = pg_num * sizeof(*pa);
+
+ if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0)
+ return ENOENT;
+
+ if ((rc = pread(fd, pa, nb, ofs)) < 0 || (rc -= nb) != 0) {
+
+ RTE_LOG(ERR, USER1, "failed read of %u bytes from \'%s\' "
+ "at offset %zu, error code: %d\n",
+ nb, PAGEMAP_FNAME, (size_t)ofs, errno);
+ rc = ENOENT;
+ }
+
+ close(fd);
+
+ for (i = 0; i != pg_num; i++)
+ pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz;
+
+ return rc;
+}
+
+struct rte_mempool *
+mempool_anon_create(const char *name, unsigned elt_num, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags)
+{
+ struct rte_mempool *mp;
+ phys_addr_t *pa;
+ char *va, *uv;
+ uint32_t n, pg_num, pg_shift, pg_sz, total_size;
+ size_t sz;
+ ssize_t usz;
+ int32_t rc;
+
+ rc = ENOMEM;
+ mp = NULL;
+
+ pg_sz = getpagesize();
+ if (rte_is_power_of_2(pg_sz) == 0) {
+ rte_errno = EINVAL;
+ return mp;
+ }
+
+ pg_shift = rte_bsf32(pg_sz);
+
+ total_size = rte_mempool_calc_obj_size(elt_size, flags, NULL);
+
+ /* calc max memory size and max number of pages needed. */
+ sz = rte_mempool_xmem_size(elt_num, total_size, pg_shift);
+ pg_num = sz >> pg_shift;
+
+ /* get chunk of virtually continuos memory.*/
+ if ((va = mmap(NULL, sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS | MAP_LOCKED,
+ -1, 0)) == MAP_FAILED) {
+ RTE_LOG(ERR, USER1, "%s(%s) failed mmap of %zu bytes, "
+ "error code: %d\n",
+ __func__, name, sz, errno);
+ rte_errno = rc;
+ return mp;
+ }
+
+ /* extract physical mappings of the allocated memory. */
+ if ((pa = calloc(pg_num, sizeof (*pa))) != NULL &&
+ (rc = get_phys_map(va, pa, pg_num, pg_sz)) == 0) {
+
+ /*
+ * Check that allocated size is big enough to hold elt_num
+ * objects and a calcualte how many bytes are actually required.
+ */
+
+ if ((usz = rte_mempool_xmem_usage(va, elt_num, total_size, pa,
+ pg_num, pg_shift)) < 0) {
+
+ n = -usz;
+ rc = ENOENT;
+ RTE_LOG(ERR, USER1, "%s(%s) only %u objects from %u "
+ "requested can be created over "
+ "mmaped region %p of %zu bytes\n",
+ __func__, name, n, elt_num, va, sz);
+ } else {
+
+ /* unmap unused pages if any */
+ if ((size_t)usz < sz) {
+
+ uv = va + usz;
+ usz = sz - usz;
+
+ RTE_LOG(INFO, USER1,
+ "%s(%s): unmap unused %zu of %zu "
+ "mmaped bytes @%p\n",
+ __func__, name, (size_t)usz, sz, uv);
+ munmap(uv, usz);
+ sz -= usz;
+ pg_num = sz >> pg_shift;
+ }
+
+ if ((mp = rte_mempool_xmem_create(name, elt_num,
+ elt_size, cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags, va, pa, pg_num,
+ pg_shift)) != NULL)
+
+ RTE_VERIFY(elt_num == mp->size);
+ }
+ }
+
+ if (mp == NULL) {
+ munmap(va, sz);
+ rte_errno = rc;
+ }
+
+ free(pa);
+ return mp;
+}
+
+#else /* RTE_EXEC_ENV_LINUXAPP */
+
+
+struct rte_mempool *
+mempool_anon_create(__rte_unused const char *name,
+ __rte_unused unsigned elt_num, __rte_unused unsigned elt_size,
+ __rte_unused unsigned cache_size,
+ __rte_unused unsigned private_data_size,
+ __rte_unused rte_mempool_ctor_t *mp_init,
+ __rte_unused void *mp_init_arg,
+ __rte_unused rte_mempool_obj_ctor_t *obj_init,
+ __rte_unused void *obj_init_arg,
+ __rte_unused int socket_id, __rte_unused unsigned flags)
+{
+ rte_errno = ENOTSUP;
+ return NULL;
+}
+
+#endif /* RTE_EXEC_ENV_LINUXAPP */
diff --git a/app/test-pmd/mempool_osdep.h b/app/test-pmd/mempool_osdep.h
new file mode 100644
index 00000000..6b8df68a
--- /dev/null
+++ b/app/test-pmd/mempool_osdep.h
@@ -0,0 +1,54 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MEMPOOL_OSDEP_H_
+#define _MEMPOOL_OSDEP_H_
+
+#include <rte_mempool.h>
+
+/**
+ * @file
+ * mempool OS specific header.
+ */
+
+/*
+ * Create mempool over objects from mmap(..., MAP_ANONYMOUS, ...).
+ */
+struct rte_mempool *
+mempool_anon_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags);
+
+#endif /*_RTE_MEMPOOL_OSDEP_H_ */
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
new file mode 100644
index 00000000..55572ebe
--- /dev/null
+++ b/app/test-pmd/parameters.c
@@ -0,0 +1,986 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#ifdef RTE_LIBRTE_CMDLINE
+#include <cmdline_parse.h>
+#include <cmdline_parse_etheraddr.h>
+#endif
+#ifdef RTE_LIBRTE_PMD_BOND
+#include <rte_eth_bond.h>
+#endif
+
+#include "testpmd.h"
+
+static void
+usage(char* progname)
+{
+ printf("usage: %s "
+#ifdef RTE_LIBRTE_CMDLINE
+ "[--interactive|-i] "
+#endif
+ "[--help|-h] | [--auto-start|-a] | ["
+ "--coremask=COREMASK --portmask=PORTMASK --numa "
+ "--mbuf-size= | --total-num-mbufs= | "
+ "--nb-cores= | --nb-ports= | "
+#ifdef RTE_LIBRTE_CMDLINE
+ "--eth-peers-configfile= | "
+ "--eth-peer=X,M:M:M:M:M:M | "
+#endif
+ "--pkt-filter-mode= |"
+ "--rss-ip | --rss-udp | "
+ "--rxpt= | --rxht= | --rxwt= | --rxfreet= | "
+ "--txpt= | --txht= | --txwt= | --txfreet= | "
+ "--txrst= | --txqflags= ]\n",
+ progname);
+#ifdef RTE_LIBRTE_CMDLINE
+ printf(" --interactive: run in interactive mode.\n");
+#endif
+ printf(" --auto-start: start forwarding on init "
+ "[always when non-interactive].\n");
+ printf(" --help: display this message and quit.\n");
+ printf(" --nb-cores=N: set the number of forwarding cores "
+ "(1 <= N <= %d).\n", nb_lcores);
+ printf(" --nb-ports=N: set the number of forwarding ports "
+ "(1 <= N <= %d).\n", nb_ports);
+ printf(" --coremask=COREMASK: hexadecimal bitmask of cores running "
+ "the packet forwarding test. The master lcore is reserved for "
+ "command line parsing only, and cannot be masked on for "
+ "packet forwarding.\n");
+ printf(" --portmask=PORTMASK: hexadecimal bitmask of ports used "
+ "by the packet forwarding test.\n");
+ printf(" --numa: enable NUMA-aware allocation of RX/TX rings and of "
+ "RX memory buffers (mbufs).\n");
+ printf(" --port-numa-config=(port,socket)[,(port,socket)]: "
+ "specify the socket on which the memory pool "
+ "used by the port will be allocated.\n");
+ printf(" --ring-numa-config=(port,flag,socket)[,(port,flag,socket)]: "
+ "specify the socket on which the TX/RX rings for "
+ "the port will be allocated "
+ "(flag: 1 for RX; 2 for TX; 3 for RX and TX).\n");
+ printf(" --socket-num=N: set socket from which all memory is allocated "
+ "in NUMA mode.\n");
+ printf(" --mbuf-size=N: set the data size of mbuf to N bytes.\n");
+ printf(" --total-num-mbufs=N: set the number of mbufs to be allocated "
+ "in mbuf pools.\n");
+ printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n");
+#ifdef RTE_LIBRTE_CMDLINE
+ printf(" --eth-peers-configfile=name: config file with ethernet addresses "
+ "of peer ports.\n");
+ printf(" --eth-peer=X,M:M:M:M:M:M: set the MAC address of the X peer "
+ "port (0 <= X < %d).\n", RTE_MAX_ETHPORTS);
+#endif
+ printf(" --pkt-filter-mode=N: set Flow Director mode "
+ "(N: none (default mode) or signature or perfect).\n");
+ printf(" --pkt-filter-report-hash=N: set Flow Director report mode "
+ "(N: none or match (default) or always).\n");
+ printf(" --pkt-filter-size=N: set Flow Director mode "
+ "(N: 64K (default mode) or 128K or 256K).\n");
+ printf(" --pkt-filter-drop-queue=N: set drop-queue. "
+ "In perfect mode, when you add a rule with queue = -1 "
+ "the packet will be enqueued into the rx drop-queue. "
+ "If the drop-queue doesn't exist, the packet is dropped. "
+ "By default drop-queue=127.\n");
+ printf(" --crc-strip: enable CRC stripping by hardware.\n");
+ printf(" --enable-rx-cksum: enable rx hardware checksum offload.\n");
+ printf(" --disable-hw-vlan: disable hardware vlan.\n");
+ printf(" --disable-hw-vlan-filter: disable hardware vlan filter.\n");
+ printf(" --disable-hw-vlan-strip: disable hardware vlan strip.\n");
+ printf(" --disable-hw-vlan-extend: disable hardware vlan extend.\n");
+ printf(" --enable-drop-en: enable per queue packet drop.\n");
+ printf(" --disable-rss: disable rss.\n");
+ printf(" --port-topology=N: set port topology (N: paired (default) or "
+ "chained).\n");
+ printf(" --forward-mode=N: set forwarding mode (N: %s).\n",
+ list_pkt_forwarding_modes());
+ printf(" --rss-ip: set RSS functions to IPv4/IPv6 only .\n");
+ printf(" --rss-udp: set RSS functions to IPv4/IPv6 + UDP.\n");
+ printf(" --rxq=N: set the number of RX queues per port to N.\n");
+ printf(" --rxd=N: set the number of descriptors in RX rings to N.\n");
+ printf(" --txq=N: set the number of TX queues per port to N.\n");
+ printf(" --txd=N: set the number of descriptors in TX rings to N.\n");
+ printf(" --burst=N: set the number of packets per burst to N.\n");
+ printf(" --mbcache=N: set the cache of mbuf memory pool to N.\n");
+ printf(" --rxpt=N: set prefetch threshold register of RX rings to N.\n");
+ printf(" --rxht=N: set the host threshold register of RX rings to N.\n");
+ printf(" --rxfreet=N: set the free threshold of RX descriptors to N "
+ "(0 <= N < value of rxd).\n");
+ printf(" --rxwt=N: set the write-back threshold register of RX rings to N.\n");
+ printf(" --txpt=N: set the prefetch threshold register of TX rings to N.\n");
+ printf(" --txht=N: set the nhost threshold register of TX rings to N.\n");
+ printf(" --txwt=N: set the write-back threshold register of TX rings to N.\n");
+ printf(" --txfreet=N: set the transmit free threshold of TX rings to N "
+ "(0 <= N <= value of txd).\n");
+ printf(" --txrst=N: set the transmit RS bit threshold of TX rings to N "
+ "(0 <= N <= value of txd).\n");
+ printf(" --txqflags=0xXXXXXXXX: hexadecimal bitmask of TX queue flags "
+ "(0 <= N <= 0x7FFFFFFF).\n");
+ printf(" --tx-queue-stats-mapping=(port,queue,mapping)[,(port,queue,mapping]: "
+ "tx queues statistics counters mapping "
+ "(0 <= mapping <= %d).\n", RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ printf(" --rx-queue-stats-mapping=(port,queue,mapping)[,(port,queue,mapping]: "
+ "rx queues statistics counters mapping "
+ "(0 <= mapping <= %d).\n", RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ printf(" --no-flush-rx: Don't flush RX streams before forwarding."
+ " Used mainly with PCAP drivers.\n");
+ printf(" --txpkts=X[,Y]*: set TX segment sizes.\n");
+ printf(" --disable-link-check: disable check on link status when "
+ "starting/stopping ports.\n");
+}
+
+#ifdef RTE_LIBRTE_CMDLINE
+static int
+init_peer_eth_addrs(char *config_filename)
+{
+ FILE *config_file;
+ portid_t i;
+ char buf[50];
+
+ config_file = fopen(config_filename, "r");
+ if (config_file == NULL) {
+ perror("Failed to open eth config file\n");
+ return -1;
+ }
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+
+ if (fgets(buf, sizeof(buf), config_file) == NULL)
+ break;
+
+ if (cmdline_parse_etheraddr(NULL, buf, &peer_eth_addrs[i],
+ sizeof(peer_eth_addrs[i])) < 0) {
+ printf("Bad MAC address format on line %d\n", i+1);
+ fclose(config_file);
+ return -1;
+ }
+ }
+ fclose(config_file);
+ nb_peer_eth_addrs = (portid_t) i;
+ return 0;
+}
+#endif
+
+/*
+ * Parse the coremask given as argument (hexadecimal string) and set
+ * the global configuration of forwarding cores.
+ */
+static void
+parse_fwd_coremask(const char *coremask)
+{
+ char *end;
+ unsigned long long int cm;
+
+ /* parse hexadecimal string */
+ end = NULL;
+ cm = strtoull(coremask, &end, 16);
+ if ((coremask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ rte_exit(EXIT_FAILURE, "Invalid fwd core mask\n");
+ else if (set_fwd_lcores_mask((uint64_t) cm) < 0)
+ rte_exit(EXIT_FAILURE, "coremask is not valid\n");
+}
+
+/*
+ * Parse the coremask given as argument (hexadecimal string) and set
+ * the global configuration of forwarding cores.
+ */
+static void
+parse_fwd_portmask(const char *portmask)
+{
+ char *end;
+ unsigned long long int pm;
+
+ /* parse hexadecimal string */
+ end = NULL;
+ pm = strtoull(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n");
+ else
+ set_fwd_ports_mask((uint64_t) pm);
+}
+
+
+static int
+parse_queue_stats_mapping_config(const char *q_arg, int is_rx)
+{
+ char s[256];
+ const char *p, *p0 = q_arg;
+ char *end;
+ enum fieldnames {
+ FLD_PORT = 0,
+ FLD_QUEUE,
+ FLD_STATS_COUNTER,
+ _NUM_FLD
+ };
+ unsigned long int_fld[_NUM_FLD];
+ char *str_fld[_NUM_FLD];
+ int i;
+ unsigned size;
+
+ /* reset from value set at definition */
+ is_rx ? (nb_rx_queue_stats_mappings = 0) : (nb_tx_queue_stats_mappings = 0);
+
+ while ((p = strchr(p0,'(')) != NULL) {
+ ++p;
+ if((p0 = strchr(p,')')) == NULL)
+ return -1;
+
+ size = p0 - p;
+ if(size >= sizeof(s))
+ return -1;
+
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
+ return -1;
+ for (i = 0; i < _NUM_FLD; i++){
+ errno = 0;
+ int_fld[i] = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ return -1;
+ }
+ /* Check mapping field is in correct range (0..RTE_ETHDEV_QUEUE_STAT_CNTRS-1) */
+ if (int_fld[FLD_STATS_COUNTER] >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ printf("Stats counter not in the correct range 0..%d\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ return -1;
+ }
+
+ if (!is_rx) {
+ if ((nb_tx_queue_stats_mappings >=
+ MAX_TX_QUEUE_STATS_MAPPINGS)) {
+ printf("exceeded max number of TX queue "
+ "statistics mappings: %hu\n",
+ nb_tx_queue_stats_mappings);
+ return -1;
+ }
+ tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].port_id =
+ (uint8_t)int_fld[FLD_PORT];
+ tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].queue_id =
+ (uint8_t)int_fld[FLD_QUEUE];
+ tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].stats_counter_id =
+ (uint8_t)int_fld[FLD_STATS_COUNTER];
+ ++nb_tx_queue_stats_mappings;
+ }
+ else {
+ if ((nb_rx_queue_stats_mappings >=
+ MAX_RX_QUEUE_STATS_MAPPINGS)) {
+ printf("exceeded max number of RX queue "
+ "statistics mappings: %hu\n",
+ nb_rx_queue_stats_mappings);
+ return -1;
+ }
+ rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].port_id =
+ (uint8_t)int_fld[FLD_PORT];
+ rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].queue_id =
+ (uint8_t)int_fld[FLD_QUEUE];
+ rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].stats_counter_id =
+ (uint8_t)int_fld[FLD_STATS_COUNTER];
+ ++nb_rx_queue_stats_mappings;
+ }
+
+ }
+/* Reassign the rx/tx_queue_stats_mappings pointer to point to this newly populated array rather */
+/* than to the default array (that was set at its definition) */
+ is_rx ? (rx_queue_stats_mappings = rx_queue_stats_mappings_array) :
+ (tx_queue_stats_mappings = tx_queue_stats_mappings_array);
+ return 0;
+}
+
+static int
+parse_portnuma_config(const char *q_arg)
+{
+ char s[256];
+ const char *p, *p0 = q_arg;
+ char *end;
+ uint8_t i,port_id,socket_id;
+ unsigned size;
+ enum fieldnames {
+ FLD_PORT = 0,
+ FLD_SOCKET,
+ _NUM_FLD
+ };
+ unsigned long int_fld[_NUM_FLD];
+ char *str_fld[_NUM_FLD];
+ portid_t pid;
+
+ /* reset from value set at definition */
+ while ((p = strchr(p0,'(')) != NULL) {
+ ++p;
+ if((p0 = strchr(p,')')) == NULL)
+ return -1;
+
+ size = p0 - p;
+ if(size >= sizeof(s))
+ return -1;
+
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
+ return -1;
+ for (i = 0; i < _NUM_FLD; i++) {
+ errno = 0;
+ int_fld[i] = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ return -1;
+ }
+ port_id = (uint8_t)int_fld[FLD_PORT];
+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ printf("Valid port range is [0");
+ FOREACH_PORT(pid, ports)
+ printf(", %d", pid);
+ printf("]\n");
+ return -1;
+ }
+ socket_id = (uint8_t)int_fld[FLD_SOCKET];
+ if(socket_id >= max_socket) {
+ printf("Invalid socket id, range is [0, %d]\n",
+ max_socket - 1);
+ return -1;
+ }
+ port_numa[port_id] = socket_id;
+ }
+
+ return 0;
+}
+
+static int
+parse_ringnuma_config(const char *q_arg)
+{
+ char s[256];
+ const char *p, *p0 = q_arg;
+ char *end;
+ uint8_t i,port_id,ring_flag,socket_id;
+ unsigned size;
+ enum fieldnames {
+ FLD_PORT = 0,
+ FLD_FLAG,
+ FLD_SOCKET,
+ _NUM_FLD
+ };
+ unsigned long int_fld[_NUM_FLD];
+ char *str_fld[_NUM_FLD];
+ portid_t pid;
+ #define RX_RING_ONLY 0x1
+ #define TX_RING_ONLY 0x2
+ #define RXTX_RING 0x3
+
+ /* reset from value set at definition */
+ while ((p = strchr(p0,'(')) != NULL) {
+ ++p;
+ if((p0 = strchr(p,')')) == NULL)
+ return -1;
+
+ size = p0 - p;
+ if(size >= sizeof(s))
+ return -1;
+
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
+ return -1;
+ for (i = 0; i < _NUM_FLD; i++) {
+ errno = 0;
+ int_fld[i] = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ return -1;
+ }
+ port_id = (uint8_t)int_fld[FLD_PORT];
+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ printf("Valid port range is [0");
+ FOREACH_PORT(pid, ports)
+ printf(", %d", pid);
+ printf("]\n");
+ return -1;
+ }
+ socket_id = (uint8_t)int_fld[FLD_SOCKET];
+ if (socket_id >= max_socket) {
+ printf("Invalid socket id, range is [0, %d]\n",
+ max_socket - 1);
+ return -1;
+ }
+ ring_flag = (uint8_t)int_fld[FLD_FLAG];
+ if ((ring_flag < RX_RING_ONLY) || (ring_flag > RXTX_RING)) {
+ printf("Invalid ring-flag=%d config for port =%d\n",
+ ring_flag,port_id);
+ return -1;
+ }
+
+ switch (ring_flag & RXTX_RING) {
+ case RX_RING_ONLY:
+ rxring_numa[port_id] = socket_id;
+ break;
+ case TX_RING_ONLY:
+ txring_numa[port_id] = socket_id;
+ break;
+ case RXTX_RING:
+ rxring_numa[port_id] = socket_id;
+ txring_numa[port_id] = socket_id;
+ break;
+ default:
+ printf("Invalid ring-flag=%d config for port=%d\n",
+ ring_flag,port_id);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+void
+launch_args_parse(int argc, char** argv)
+{
+ int n, opt;
+ char **argvopt;
+ int opt_idx;
+ enum { TX, RX };
+
+ static struct option lgopts[] = {
+ { "help", 0, 0, 0 },
+#ifdef RTE_LIBRTE_CMDLINE
+ { "interactive", 0, 0, 0 },
+ { "auto-start", 0, 0, 0 },
+ { "eth-peers-configfile", 1, 0, 0 },
+ { "eth-peer", 1, 0, 0 },
+#endif
+ { "ports", 1, 0, 0 },
+ { "nb-cores", 1, 0, 0 },
+ { "nb-ports", 1, 0, 0 },
+ { "coremask", 1, 0, 0 },
+ { "portmask", 1, 0, 0 },
+ { "numa", 0, 0, 0 },
+ { "mp-anon", 0, 0, 0 },
+ { "port-numa-config", 1, 0, 0 },
+ { "ring-numa-config", 1, 0, 0 },
+ { "socket-num", 1, 0, 0 },
+ { "mbuf-size", 1, 0, 0 },
+ { "total-num-mbufs", 1, 0, 0 },
+ { "max-pkt-len", 1, 0, 0 },
+ { "pkt-filter-mode", 1, 0, 0 },
+ { "pkt-filter-report-hash", 1, 0, 0 },
+ { "pkt-filter-size", 1, 0, 0 },
+ { "pkt-filter-drop-queue", 1, 0, 0 },
+ { "crc-strip", 0, 0, 0 },
+ { "enable-rx-cksum", 0, 0, 0 },
+ { "disable-hw-vlan", 0, 0, 0 },
+ { "disable-hw-vlan-filter", 0, 0, 0 },
+ { "disable-hw-vlan-strip", 0, 0, 0 },
+ { "disable-hw-vlan-extend", 0, 0, 0 },
+ { "enable-drop-en", 0, 0, 0 },
+ { "disable-rss", 0, 0, 0 },
+ { "port-topology", 1, 0, 0 },
+ { "forward-mode", 1, 0, 0 },
+ { "rss-ip", 0, 0, 0 },
+ { "rss-udp", 0, 0, 0 },
+ { "rxq", 1, 0, 0 },
+ { "txq", 1, 0, 0 },
+ { "rxd", 1, 0, 0 },
+ { "txd", 1, 0, 0 },
+ { "burst", 1, 0, 0 },
+ { "mbcache", 1, 0, 0 },
+ { "txpt", 1, 0, 0 },
+ { "txht", 1, 0, 0 },
+ { "txwt", 1, 0, 0 },
+ { "txfreet", 1, 0, 0 },
+ { "txrst", 1, 0, 0 },
+ { "txqflags", 1, 0, 0 },
+ { "rxpt", 1, 0, 0 },
+ { "rxht", 1, 0, 0 },
+ { "rxwt", 1, 0, 0 },
+ { "rxfreet", 1, 0, 0 },
+ { "tx-queue-stats-mapping", 1, 0, 0 },
+ { "rx-queue-stats-mapping", 1, 0, 0 },
+ { "no-flush-rx", 0, 0, 0 },
+ { "txpkts", 1, 0, 0 },
+ { "disable-link-check", 0, 0, 0 },
+ { 0, 0, 0, 0 },
+ };
+
+ argvopt = argv;
+
+#ifdef RTE_LIBRTE_CMDLINE
+#define SHORTOPTS "i"
+#else
+#define SHORTOPTS ""
+#endif
+ while ((opt = getopt_long(argc, argvopt, SHORTOPTS "ah",
+ lgopts, &opt_idx)) != EOF) {
+ switch (opt) {
+#ifdef RTE_LIBRTE_CMDLINE
+ case 'i':
+ printf("Interactive-mode selected\n");
+ interactive = 1;
+ break;
+#endif
+ case 'a':
+ printf("Auto-start selected\n");
+ auto_start = 1;
+ break;
+
+ case 0: /*long options */
+ if (!strcmp(lgopts[opt_idx].name, "help")) {
+ usage(argv[0]);
+ rte_exit(EXIT_SUCCESS, "Displayed help\n");
+ }
+#ifdef RTE_LIBRTE_CMDLINE
+ if (!strcmp(lgopts[opt_idx].name, "interactive")) {
+ printf("Interactive-mode selected\n");
+ interactive = 1;
+ }
+ if (!strcmp(lgopts[opt_idx].name, "auto-start")) {
+ printf("Auto-start selected\n");
+ auto_start = 1;
+ }
+ if (!strcmp(lgopts[opt_idx].name,
+ "eth-peers-configfile")) {
+ if (init_peer_eth_addrs(optarg) != 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot open logfile\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "eth-peer")) {
+ char *port_end;
+ uint8_t c, peer_addr[6];
+
+ errno = 0;
+ n = strtoul(optarg, &port_end, 10);
+ if (errno != 0 || port_end == optarg || *port_end++ != ',')
+ rte_exit(EXIT_FAILURE,
+ "Invalid eth-peer: %s", optarg);
+ if (n >= RTE_MAX_ETHPORTS)
+ rte_exit(EXIT_FAILURE,
+ "eth-peer: port %d >= RTE_MAX_ETHPORTS(%d)\n",
+ n, RTE_MAX_ETHPORTS);
+
+ if (cmdline_parse_etheraddr(NULL, port_end,
+ &peer_addr, sizeof(peer_addr)) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Invalid ethernet address: %s\n",
+ port_end);
+ for (c = 0; c < 6; c++)
+ peer_eth_addrs[n].addr_bytes[c] =
+ peer_addr[c];
+ nb_peer_eth_addrs++;
+ }
+#endif
+ if (!strcmp(lgopts[opt_idx].name, "nb-ports")) {
+ n = atoi(optarg);
+ if (n > 0 && n <= nb_ports)
+ nb_fwd_ports = (uint8_t) n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "Invalid port %d\n", n);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "nb-cores")) {
+ n = atoi(optarg);
+ if (n > 0 && n <= nb_lcores)
+ nb_fwd_lcores = (uint8_t) n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "nb-cores should be > 0 and <= %d\n",
+ nb_lcores);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "coremask"))
+ parse_fwd_coremask(optarg);
+ if (!strcmp(lgopts[opt_idx].name, "portmask"))
+ parse_fwd_portmask(optarg);
+ if (!strcmp(lgopts[opt_idx].name, "numa")) {
+ numa_support = 1;
+ memset(port_numa,NUMA_NO_CONFIG,RTE_MAX_ETHPORTS);
+ memset(rxring_numa,NUMA_NO_CONFIG,RTE_MAX_ETHPORTS);
+ memset(txring_numa,NUMA_NO_CONFIG,RTE_MAX_ETHPORTS);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "mp-anon")) {
+ mp_anon = 1;
+ }
+ if (!strcmp(lgopts[opt_idx].name, "port-numa-config")) {
+ if (parse_portnuma_config(optarg))
+ rte_exit(EXIT_FAILURE,
+ "invalid port-numa configuration\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "ring-numa-config"))
+ if (parse_ringnuma_config(optarg))
+ rte_exit(EXIT_FAILURE,
+ "invalid ring-numa configuration\n");
+ if (!strcmp(lgopts[opt_idx].name, "socket-num")) {
+ n = atoi(optarg);
+ if((uint8_t)n < max_socket)
+ socket_num = (uint8_t)n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "The socket number should be < %d\n",
+ max_socket);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "mbuf-size")) {
+ n = atoi(optarg);
+ if (n > 0 && n <= 0xFFFF)
+ mbuf_data_size = (uint16_t) n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "mbuf-size should be > 0 and < 65536\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "total-num-mbufs")) {
+ n = atoi(optarg);
+ if (n > 1024)
+ param_total_num_mbufs = (unsigned)n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "total-num-mbufs should be > 1024\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
+ n = atoi(optarg);
+ if (n >= ETHER_MIN_LEN) {
+ rx_mode.max_rx_pkt_len = (uint32_t) n;
+ if (n > ETHER_MAX_LEN)
+ rx_mode.jumbo_frame = 1;
+ } else
+ rte_exit(EXIT_FAILURE,
+ "Invalid max-pkt-len=%d - should be > %d\n",
+ n, ETHER_MIN_LEN);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "pkt-filter-mode")) {
+ if (!strcmp(optarg, "signature"))
+ fdir_conf.mode =
+ RTE_FDIR_MODE_SIGNATURE;
+ else if (!strcmp(optarg, "perfect"))
+ fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
+ else if (!strcmp(optarg, "perfect-mac-vlan"))
+ fdir_conf.mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+ else if (!strcmp(optarg, "perfect-tunnel"))
+ fdir_conf.mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+ else if (!strcmp(optarg, "none"))
+ fdir_conf.mode = RTE_FDIR_MODE_NONE;
+ else
+ rte_exit(EXIT_FAILURE,
+ "pkt-mode-invalid %s invalid - must be: "
+ "none, signature, perfect, perfect-mac-vlan"
+ " or perfect-tunnel\n",
+ optarg);
+ }
+ if (!strcmp(lgopts[opt_idx].name,
+ "pkt-filter-report-hash")) {
+ if (!strcmp(optarg, "none"))
+ fdir_conf.status =
+ RTE_FDIR_NO_REPORT_STATUS;
+ else if (!strcmp(optarg, "match"))
+ fdir_conf.status =
+ RTE_FDIR_REPORT_STATUS;
+ else if (!strcmp(optarg, "always"))
+ fdir_conf.status =
+ RTE_FDIR_REPORT_STATUS_ALWAYS;
+ else
+ rte_exit(EXIT_FAILURE,
+ "pkt-filter-report-hash %s invalid "
+ "- must be: none or match or always\n",
+ optarg);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
+ if (!strcmp(optarg, "64K"))
+ fdir_conf.pballoc =
+ RTE_FDIR_PBALLOC_64K;
+ else if (!strcmp(optarg, "128K"))
+ fdir_conf.pballoc =
+ RTE_FDIR_PBALLOC_128K;
+ else if (!strcmp(optarg, "256K"))
+ fdir_conf.pballoc =
+ RTE_FDIR_PBALLOC_256K;
+ else
+ rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
+ " must be: 64K or 128K or 256K\n",
+ optarg);
+ }
+ if (!strcmp(lgopts[opt_idx].name,
+ "pkt-filter-drop-queue")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ fdir_conf.drop_queue = (uint8_t) n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "drop queue %d invalid - must"
+ "be >= 0 \n", n);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "crc-strip"))
+ rx_mode.hw_strip_crc = 1;
+ if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
+ rx_mode.hw_ip_checksum = 1;
+
+ if (!strcmp(lgopts[opt_idx].name, "disable-hw-vlan")) {
+ rx_mode.hw_vlan_filter = 0;
+ rx_mode.hw_vlan_strip = 0;
+ rx_mode.hw_vlan_extend = 0;
+ }
+
+ if (!strcmp(lgopts[opt_idx].name,
+ "disable-hw-vlan-filter"))
+ rx_mode.hw_vlan_filter = 0;
+
+ if (!strcmp(lgopts[opt_idx].name,
+ "disable-hw-vlan-strip"))
+ rx_mode.hw_vlan_strip = 0;
+
+ if (!strcmp(lgopts[opt_idx].name,
+ "disable-hw-vlan-extend"))
+ rx_mode.hw_vlan_extend = 0;
+
+ if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
+ rx_drop_en = 1;
+
+ if (!strcmp(lgopts[opt_idx].name, "disable-rss"))
+ rss_hf = 0;
+ if (!strcmp(lgopts[opt_idx].name, "port-topology")) {
+ if (!strcmp(optarg, "paired"))
+ port_topology = PORT_TOPOLOGY_PAIRED;
+ else if (!strcmp(optarg, "chained"))
+ port_topology = PORT_TOPOLOGY_CHAINED;
+ else if (!strcmp(optarg, "loop"))
+ port_topology = PORT_TOPOLOGY_LOOP;
+ else
+ rte_exit(EXIT_FAILURE, "port-topology %s invalid -"
+ " must be: paired or chained \n",
+ optarg);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
+ set_pkt_forwarding_mode(optarg);
+ if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
+ rss_hf = ETH_RSS_IP;
+ if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
+ rss_hf = ETH_RSS_UDP;
+ if (!strcmp(lgopts[opt_idx].name, "rxq")) {
+ n = atoi(optarg);
+ if (n >= 0 && n <= (int) MAX_QUEUE_ID)
+ nb_rxq = (queueid_t) n;
+ else
+ rte_exit(EXIT_FAILURE, "rxq %d invalid - must be"
+ " >= 0 && <= %d\n", n,
+ (int) MAX_QUEUE_ID);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txq")) {
+ n = atoi(optarg);
+ if (n >= 0 && n <= (int) MAX_QUEUE_ID)
+ nb_txq = (queueid_t) n;
+ else
+ rte_exit(EXIT_FAILURE, "txq %d invalid - must be"
+ " >= 0 && <= %d\n", n,
+ (int) MAX_QUEUE_ID);
+ }
+ if (!nb_rxq && !nb_txq) {
+ rte_exit(EXIT_FAILURE, "Either rx or tx queues should "
+ "be non-zero\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "burst")) {
+ n = atoi(optarg);
+ if ((n >= 1) && (n <= MAX_PKT_BURST))
+ nb_pkt_per_burst = (uint16_t) n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "burst must >= 1 and <= %d]",
+ MAX_PKT_BURST);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "mbcache")) {
+ n = atoi(optarg);
+ if ((n >= 0) &&
+ (n <= RTE_MEMPOOL_CACHE_MAX_SIZE))
+ mb_mempool_cache = (uint16_t) n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "mbcache must be >= 0 and <= %d\n",
+ RTE_MEMPOOL_CACHE_MAX_SIZE);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txfreet")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ tx_free_thresh = (int16_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "txfreet must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txrst")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ tx_rs_thresh = (int16_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "txrst must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txqflags")) {
+ char *end = NULL;
+ n = strtoul(optarg, &end, 16);
+ if (n >= 0)
+ txq_flags = (int32_t)n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "txqflags must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "rxd")) {
+ n = atoi(optarg);
+ if (n > 0) {
+ if (rx_free_thresh >= n)
+ rte_exit(EXIT_FAILURE,
+ "rxd must be > "
+ "rx_free_thresh(%d)\n",
+ (int)rx_free_thresh);
+ else
+ nb_rxd = (uint16_t) n;
+ } else
+ rte_exit(EXIT_FAILURE,
+ "rxd(%d) invalid - must be > 0\n",
+ n);
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txd")) {
+ n = atoi(optarg);
+ if (n > 0)
+ nb_txd = (uint16_t) n;
+ else
+ rte_exit(EXIT_FAILURE, "txd must be in > 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txpt")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ tx_pthresh = (int8_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "txpt must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txht")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ tx_hthresh = (int8_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "txht must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txwt")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ tx_wthresh = (int8_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "txwt must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "rxpt")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ rx_pthresh = (int8_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "rxpt must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "rxht")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ rx_hthresh = (int8_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "rxht must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "rxwt")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ rx_wthresh = (int8_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "rxwt must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "rxfreet")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ rx_free_thresh = (int16_t)n;
+ else
+ rte_exit(EXIT_FAILURE, "rxfreet must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "tx-queue-stats-mapping")) {
+ if (parse_queue_stats_mapping_config(optarg, TX)) {
+ rte_exit(EXIT_FAILURE,
+ "invalid TX queue statistics mapping config entered\n");
+ }
+ }
+ if (!strcmp(lgopts[opt_idx].name, "rx-queue-stats-mapping")) {
+ if (parse_queue_stats_mapping_config(optarg, RX)) {
+ rte_exit(EXIT_FAILURE,
+ "invalid RX queue statistics mapping config entered\n");
+ }
+ }
+ if (!strcmp(lgopts[opt_idx].name, "txpkts")) {
+ unsigned seg_lengths[RTE_MAX_SEGS_PER_PKT];
+ unsigned int nb_segs;
+
+ nb_segs = parse_item_list(optarg, "txpkt segments",
+ RTE_MAX_SEGS_PER_PKT, seg_lengths, 0);
+ if (nb_segs > 0)
+ set_tx_pkt_segments(seg_lengths, nb_segs);
+ else
+ rte_exit(EXIT_FAILURE, "bad txpkts\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "no-flush-rx"))
+ no_flush_rx = 1;
+ if (!strcmp(lgopts[opt_idx].name, "disable-link-check"))
+ no_link_check = 1;
+
+ break;
+ case 'h':
+ usage(argv[0]);
+ rte_exit(EXIT_SUCCESS, "Displayed help\n");
+ break;
+ default:
+ usage(argv[0]);
+ rte_exit(EXIT_FAILURE,
+ "Command line is incomplete or incorrect\n");
+ break;
+ }
+ }
+}
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
new file mode 100644
index 00000000..14555abc
--- /dev/null
+++ b/app/test-pmd/rxonly.c
@@ -0,0 +1,404 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+
+#include "testpmd.h"
+
+static inline void
+print_ether_addr(const char *what, struct ether_addr *eth_addr)
+{
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", what, buf);
+}
+
+/*
+ * Received a burst of packets.
+ */
+static void
+pkt_burst_receive(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *mb;
+ struct ether_hdr *eth_hdr;
+ uint16_t eth_type;
+ uint64_t ol_flags;
+ uint16_t nb_rx;
+ uint16_t i, packet_type;
+ uint16_t is_encapsulation;
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /*
+ * Receive a burst of packets.
+ */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+ fs->rx_packets += nb_rx;
+
+ /*
+ * Dump each received packet if verbose_level > 0.
+ */
+ if (verbose_level > 0)
+ printf("port %u/queue %u: received %u packets\n",
+ (unsigned) fs->rx_port,
+ (unsigned) fs->rx_queue,
+ (unsigned) nb_rx);
+ for (i = 0; i < nb_rx; i++) {
+ mb = pkts_burst[i];
+ if (verbose_level == 0) {
+ rte_pktmbuf_free(mb);
+ continue;
+ }
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+ eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
+ ol_flags = mb->ol_flags;
+ packet_type = mb->packet_type;
+ is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type);
+
+ print_ether_addr(" src=", &eth_hdr->s_addr);
+ print_ether_addr(" - dst=", &eth_hdr->d_addr);
+ printf(" - type=0x%04x - length=%u - nb_segs=%d",
+ eth_type, (unsigned) mb->pkt_len,
+ (int)mb->nb_segs);
+ if (ol_flags & PKT_RX_RSS_HASH) {
+ printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
+ printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
+ } else if (ol_flags & PKT_RX_FDIR) {
+ printf(" - FDIR matched ");
+ if (ol_flags & PKT_RX_FDIR_ID)
+ printf("ID=0x%x",
+ mb->hash.fdir.hi);
+ else if (ol_flags & PKT_RX_FDIR_FLX)
+ printf("flex bytes=0x%08x %08x",
+ mb->hash.fdir.hi, mb->hash.fdir.lo);
+ else
+ printf("hash=0x%x ID=0x%x ",
+ mb->hash.fdir.hash, mb->hash.fdir.id);
+ }
+ if (ol_flags & PKT_RX_VLAN_PKT)
+ printf(" - VLAN tci=0x%x", mb->vlan_tci);
+ if (ol_flags & PKT_RX_QINQ_PKT)
+ printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
+ mb->vlan_tci, mb->vlan_tci_outer);
+ if (mb->packet_type) {
+ uint32_t ptype;
+
+ /* (outer) L2 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_L2_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_L2_ETHER:
+ printf(" - (outer) L2 type: ETHER");
+ break;
+ case RTE_PTYPE_L2_ETHER_TIMESYNC:
+ printf(" - (outer) L2 type: ETHER_Timesync");
+ break;
+ case RTE_PTYPE_L2_ETHER_ARP:
+ printf(" - (outer) L2 type: ETHER_ARP");
+ break;
+ case RTE_PTYPE_L2_ETHER_LLDP:
+ printf(" - (outer) L2 type: ETHER_LLDP");
+ break;
+ default:
+ printf(" - (outer) L2 type: Unknown");
+ break;
+ }
+
+ /* (outer) L3 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_L3_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_L3_IPV4:
+ printf(" - (outer) L3 type: IPV4");
+ break;
+ case RTE_PTYPE_L3_IPV4_EXT:
+ printf(" - (outer) L3 type: IPV4_EXT");
+ break;
+ case RTE_PTYPE_L3_IPV6:
+ printf(" - (outer) L3 type: IPV6");
+ break;
+ case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
+ printf(" - (outer) L3 type: IPV4_EXT_UNKNOWN");
+ break;
+ case RTE_PTYPE_L3_IPV6_EXT:
+ printf(" - (outer) L3 type: IPV6_EXT");
+ break;
+ case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
+ printf(" - (outer) L3 type: IPV6_EXT_UNKNOWN");
+ break;
+ default:
+ printf(" - (outer) L3 type: Unknown");
+ break;
+ }
+
+ /* (outer) L4 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_L4_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_L4_TCP:
+ printf(" - (outer) L4 type: TCP");
+ break;
+ case RTE_PTYPE_L4_UDP:
+ printf(" - (outer) L4 type: UDP");
+ break;
+ case RTE_PTYPE_L4_FRAG:
+ printf(" - (outer) L4 type: L4_FRAG");
+ break;
+ case RTE_PTYPE_L4_SCTP:
+ printf(" - (outer) L4 type: SCTP");
+ break;
+ case RTE_PTYPE_L4_ICMP:
+ printf(" - (outer) L4 type: ICMP");
+ break;
+ case RTE_PTYPE_L4_NONFRAG:
+ printf(" - (outer) L4 type: L4_NONFRAG");
+ break;
+ default:
+ printf(" - (outer) L4 type: Unknown");
+ break;
+ }
+
+ /* packet tunnel type */
+ ptype = mb->packet_type & RTE_PTYPE_TUNNEL_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_TUNNEL_IP:
+ printf(" - Tunnel type: IP");
+ break;
+ case RTE_PTYPE_TUNNEL_GRE:
+ printf(" - Tunnel type: GRE");
+ break;
+ case RTE_PTYPE_TUNNEL_VXLAN:
+ printf(" - Tunnel type: VXLAN");
+ break;
+ case RTE_PTYPE_TUNNEL_NVGRE:
+ printf(" - Tunnel type: NVGRE");
+ break;
+ case RTE_PTYPE_TUNNEL_GENEVE:
+ printf(" - Tunnel type: GENEVE");
+ break;
+ case RTE_PTYPE_TUNNEL_GRENAT:
+ printf(" - Tunnel type: GRENAT");
+ break;
+ default:
+ printf(" - Tunnel type: Unknown");
+ break;
+ }
+
+ /* inner L2 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_INNER_L2_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_INNER_L2_ETHER:
+ printf(" - Inner L2 type: ETHER");
+ break;
+ case RTE_PTYPE_INNER_L2_ETHER_VLAN:
+ printf(" - Inner L2 type: ETHER_VLAN");
+ break;
+ default:
+ printf(" - Inner L2 type: Unknown");
+ break;
+ }
+
+ /* inner L3 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_INNER_L3_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_INNER_L3_IPV4:
+ printf(" - Inner L3 type: IPV4");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV4_EXT:
+ printf(" - Inner L3 type: IPV4_EXT");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV6:
+ printf(" - Inner L3 type: IPV6");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:
+ printf(" - Inner L3 type: IPV4_EXT_UNKNOWN");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV6_EXT:
+ printf(" - Inner L3 type: IPV6_EXT");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:
+ printf(" - Inner L3 type: IPV6_EXT_UNKNOWN");
+ break;
+ default:
+ printf(" - Inner L3 type: Unknown");
+ break;
+ }
+
+ /* inner L4 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_INNER_L4_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_INNER_L4_TCP:
+ printf(" - Inner L4 type: TCP");
+ break;
+ case RTE_PTYPE_INNER_L4_UDP:
+ printf(" - Inner L4 type: UDP");
+ break;
+ case RTE_PTYPE_INNER_L4_FRAG:
+ printf(" - Inner L4 type: L4_FRAG");
+ break;
+ case RTE_PTYPE_INNER_L4_SCTP:
+ printf(" - Inner L4 type: SCTP");
+ break;
+ case RTE_PTYPE_INNER_L4_ICMP:
+ printf(" - Inner L4 type: ICMP");
+ break;
+ case RTE_PTYPE_INNER_L4_NONFRAG:
+ printf(" - Inner L4 type: L4_NONFRAG");
+ break;
+ default:
+ printf(" - Inner L4 type: Unknown");
+ break;
+ }
+ printf("\n");
+ } else
+ printf("Unknown packet type\n");
+ if (is_encapsulation) {
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct udp_hdr *udp_hdr;
+ uint8_t l2_len;
+ uint8_t l3_len;
+ uint8_t l4_len;
+ uint8_t l4_proto;
+ struct vxlan_hdr *vxlan_hdr;
+
+ l2_len = sizeof(struct ether_hdr);
+
+ /* Do not support ipv4 option field */
+ if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
+ l3_len = sizeof(struct ipv4_hdr);
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct ipv4_hdr *,
+ l2_len);
+ l4_proto = ipv4_hdr->next_proto_id;
+ } else {
+ l3_len = sizeof(struct ipv6_hdr);
+ ipv6_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct ipv6_hdr *,
+ l2_len);
+ l4_proto = ipv6_hdr->proto;
+ }
+ if (l4_proto == IPPROTO_UDP) {
+ udp_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct udp_hdr *,
+ l2_len + l3_len);
+ l4_len = sizeof(struct udp_hdr);
+ vxlan_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct vxlan_hdr *,
+ l2_len + l3_len + l4_len);
+
+ printf(" - VXLAN packet: packet type =%d, "
+ "Destination UDP port =%d, VNI = %d",
+ packet_type, RTE_BE_TO_CPU_16(udp_hdr->dst_port),
+ rte_be_to_cpu_32(vxlan_hdr->vx_vni) >> 8);
+ }
+ }
+ printf(" - Receive queue=0x%x", (unsigned) fs->rx_queue);
+ printf("\n");
+ if (ol_flags != 0) {
+ unsigned rxf;
+ const char *name;
+
+ for (rxf = 0; rxf < sizeof(mb->ol_flags) * 8; rxf++) {
+ if ((ol_flags & (1ULL << rxf)) == 0)
+ continue;
+ name = rte_get_rx_ol_flag_name(1ULL << rxf);
+ if (name == NULL)
+ continue;
+ printf(" %s\n", name);
+ }
+ }
+ rte_pktmbuf_free(mb);
+ }
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine rx_only_engine = {
+ .fwd_mode_name = "rxonly",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_receive,
+};
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
new file mode 100644
index 00000000..26a174c1
--- /dev/null
+++ b/app/test-pmd/testpmd.c
@@ -0,0 +1,2097 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+#include <rte_string_fns.h>
+#ifdef RTE_LIBRTE_PMD_XENVIRT
+#include <rte_eth_xenvirt.h>
+#endif
+
+#include "testpmd.h"
+#include "mempool_osdep.h"
+
+uint16_t verbose_level = 0; /**< Silent by default. */
+
+/* use master core for command line ? */
+uint8_t interactive = 0;
+uint8_t auto_start = 0;
+
+/*
+ * NUMA support configuration.
+ * When set, the NUMA support attempts to dispatch the allocation of the
+ * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
+ * probed ports among the CPU sockets 0 and 1.
+ * Otherwise, all memory is allocated from CPU socket 0.
+ */
+uint8_t numa_support = 0; /**< No numa support by default */
+
+/*
+ * In UMA mode,all memory is allocated from socket 0 if --socket-num is
+ * not configured.
+ */
+uint8_t socket_num = UMA_NO_CONFIG;
+
+/*
+ * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
+ */
+uint8_t mp_anon = 0;
+
+/*
+ * Record the Ethernet address of peer target ports to which packets are
+ * forwarded.
+ * Must be instanciated with the ethernet addresses of peer traffic generator
+ * ports.
+ */
+struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+portid_t nb_peer_eth_addrs = 0;
+
+/*
+ * Probed Target Environment.
+ */
+struct rte_port *ports; /**< For all probed ethernet ports. */
+portid_t nb_ports; /**< Number of probed ethernet ports. */
+struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
+lcoreid_t nb_lcores; /**< Number of probed logical cores. */
+
+/*
+ * Test Forwarding Configuration.
+ * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
+ * nb_fwd_ports <= nb_cfg_ports <= nb_ports
+ */
+lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
+lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
+portid_t nb_cfg_ports; /**< Number of configured ports. */
+portid_t nb_fwd_ports; /**< Number of forwarding ports. */
+
+unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
+portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
+
+struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
+streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
+
+/*
+ * Forwarding engines.
+ */
+struct fwd_engine * fwd_engines[] = {
+ &io_fwd_engine,
+ &mac_fwd_engine,
+ &mac_retry_fwd_engine,
+ &mac_swap_engine,
+ &flow_gen_engine,
+ &rx_only_engine,
+ &tx_only_engine,
+ &csum_fwd_engine,
+ &icmp_echo_engine,
+#ifdef RTE_LIBRTE_IEEE1588
+ &ieee1588_fwd_engine,
+#endif
+ NULL,
+};
+
+struct fwd_config cur_fwd_config;
+struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
+
+uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
+uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
+ * specified on command-line. */
+
+/*
+ * Configuration of packet segments used by the "txonly" processing engine.
+ */
+uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
+uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
+ TXONLY_DEF_PACKET_LEN,
+};
+uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
+
+enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
+/**< Split policy for packets to TX. */
+
+uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
+uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
+
+/* current configuration is in DCB or not,0 means it is not in DCB mode */
+uint8_t dcb_config = 0;
+
+/* Whether the dcb is in testing status */
+uint8_t dcb_test = 0;
+
+/*
+ * Configurable number of RX/TX queues.
+ */
+queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
+queueid_t nb_txq = 1; /**< Number of TX queues per port. */
+
+/*
+ * Configurable number of RX/TX ring descriptors.
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
+uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
+
+#define RTE_PMD_PARAM_UNSET -1
+/*
+ * Configurable values of RX and TX ring threshold registers.
+ */
+
+int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
+int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
+int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
+
+int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
+int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
+int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
+
+/*
+ * Configurable value of RX free threshold.
+ */
+int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
+
+/*
+ * Configurable value of RX drop enable.
+ */
+int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
+
+/*
+ * Configurable value of TX free threshold.
+ */
+int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
+
+/*
+ * Configurable value of TX RS bit threshold.
+ */
+int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
+
+/*
+ * Configurable value of TX queue flags.
+ */
+int32_t txq_flags = RTE_PMD_PARAM_UNSET;
+
+/*
+ * Receive Side Scaling (RSS) configuration.
+ */
+uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
+
+/*
+ * Port topology configuration
+ */
+uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
+
+/*
+ * Avoids to flush all the RX streams before starts forwarding.
+ */
+uint8_t no_flush_rx = 0; /* flush by default */
+
+/*
+ * Avoids to check link status when starting/stopping a port.
+ */
+uint8_t no_link_check = 0; /* check by default */
+
+/*
+ * NIC bypass mode configuration options.
+ */
+#ifdef RTE_NIC_BYPASS
+
+/* The NIC bypass watchdog timeout. */
+uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
+
+#endif
+
+/*
+ * Ethernet device configuration.
+ */
+struct rte_eth_rxmode rx_mode = {
+ .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled. */
+ .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
+ .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
+ .hw_vlan_strip = 1, /**< VLAN strip enabled. */
+ .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
+ .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
+ .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
+};
+
+struct rte_fdir_conf fdir_conf = {
+ .mode = RTE_FDIR_MODE_NONE,
+ .pballoc = RTE_FDIR_PBALLOC_64K,
+ .status = RTE_FDIR_REPORT_STATUS,
+ .mask = {
+ .vlan_tci_mask = 0x0,
+ .ipv4_mask = {
+ .src_ip = 0xFFFFFFFF,
+ .dst_ip = 0xFFFFFFFF,
+ },
+ .ipv6_mask = {
+ .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ },
+ .src_port_mask = 0xFFFF,
+ .dst_port_mask = 0xFFFF,
+ .mac_addr_byte_mask = 0xFF,
+ .tunnel_type_mask = 1,
+ .tunnel_id_mask = 0xFFFFFFFF,
+ },
+ .drop_queue = 127,
+};
+
+volatile int test_done = 1; /* stop packet forwarding when set to 1. */
+
+struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
+struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
+
+struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
+struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
+
+uint16_t nb_tx_queue_stats_mappings = 0;
+uint16_t nb_rx_queue_stats_mappings = 0;
+
+unsigned max_socket = 0;
+
+/* Forward function declarations */
+static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
+static void check_all_ports_link_status(uint32_t port_mask);
+
+/*
+ * Check if all the ports are started.
+ * If yes, return positive value. If not, return zero.
+ */
+static int all_ports_started(void);
+
+/*
+ * Find next enabled port
+ */
+portid_t
+find_next_port(portid_t p, struct rte_port *ports, int size)
+{
+ if (ports == NULL)
+ rte_exit(-EINVAL, "failed to find a next port id\n");
+
+ while ((p < size) && (ports[p].enabled == 0))
+ p++;
+ return p;
+}
+
+/*
+ * Setup default configuration.
+ */
+static void
+set_default_fwd_lcores_config(void)
+{
+ unsigned int i;
+ unsigned int nb_lc;
+ unsigned int sock_num;
+
+ nb_lc = 0;
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ sock_num = rte_lcore_to_socket_id(i) + 1;
+ if (sock_num > max_socket) {
+ if (sock_num > RTE_MAX_NUMA_NODES)
+ rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
+ max_socket = sock_num;
+ }
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ if (i == rte_get_master_lcore())
+ continue;
+ fwd_lcores_cpuids[nb_lc++] = i;
+ }
+ nb_lcores = (lcoreid_t) nb_lc;
+ nb_cfg_lcores = nb_lcores;
+ nb_fwd_lcores = 1;
+}
+
+static void
+set_def_peer_eth_addrs(void)
+{
+ portid_t i;
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
+ peer_eth_addrs[i].addr_bytes[5] = i;
+ }
+}
+
+static void
+set_default_fwd_ports_config(void)
+{
+ portid_t pt_id;
+
+ for (pt_id = 0; pt_id < nb_ports; pt_id++)
+ fwd_ports_ids[pt_id] = pt_id;
+
+ nb_cfg_ports = nb_ports;
+ nb_fwd_ports = nb_ports;
+}
+
+void
+set_def_fwd_config(void)
+{
+ set_default_fwd_lcores_config();
+ set_def_peer_eth_addrs();
+ set_default_fwd_ports_config();
+}
+
+/*
+ * Configuration initialisation done once at init time.
+ */
+static void
+mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
+ unsigned int socket_id)
+{
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *rte_mp = NULL;
+ uint32_t mb_size;
+
+ mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
+
+#ifdef RTE_LIBRTE_PMD_XENVIRT
+ rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
+ (unsigned) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ socket_id, 0);
+#endif
+
+ /* if the former XEN allocation failed fall back to normal allocation */
+ if (rte_mp == NULL) {
+ if (mp_anon != 0)
+ rte_mp = mempool_anon_create(pool_name, nb_mbuf,
+ mb_size, (unsigned) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ socket_id, 0);
+ else
+ /* wrapper to rte_mempool_create() */
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size, socket_id);
+ }
+
+ if (rte_mp == NULL) {
+ rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
+ "failed\n", socket_id);
+ } else if (verbose_level > 0) {
+ rte_mempool_dump(stdout, rte_mp);
+ }
+}
+
+/*
+ * Check given socket id is valid or not with NUMA mode,
+ * if valid, return 0, else return -1
+ */
+static int
+check_socket_id(const unsigned int socket_id)
+{
+ static int warning_once = 0;
+
+ if (socket_id >= max_socket) {
+ if (!warning_once && numa_support)
+ printf("Warning: NUMA should be configured manually by"
+ " using --port-numa-config and"
+ " --ring-numa-config parameters along with"
+ " --numa.\n");
+ warning_once = 1;
+ return -1;
+ }
+ return 0;
+}
+
+static void
+init_config(void)
+{
+ portid_t pid;
+ struct rte_port *port;
+ struct rte_mempool *mbp;
+ unsigned int nb_mbuf_per_pool;
+ lcoreid_t lc_id;
+ uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
+
+ memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
+ /* Configuration of logical cores. */
+ fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
+ sizeof(struct fwd_lcore *) * nb_lcores,
+ RTE_CACHE_LINE_SIZE);
+ if (fwd_lcores == NULL) {
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
+ "failed\n", nb_lcores);
+ }
+ for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
+ fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
+ sizeof(struct fwd_lcore),
+ RTE_CACHE_LINE_SIZE);
+ if (fwd_lcores[lc_id] == NULL) {
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
+ "failed\n");
+ }
+ fwd_lcores[lc_id]->cpuid_idx = lc_id;
+ }
+
+ /*
+ * Create pools of mbuf.
+ * If NUMA support is disabled, create a single pool of mbuf in
+ * socket 0 memory by default.
+ * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
+ *
+ * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
+ * nb_txd can be configured at run time.
+ */
+ if (param_total_num_mbufs)
+ nb_mbuf_per_pool = param_total_num_mbufs;
+ else {
+ nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
+ + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
+
+ if (!numa_support)
+ nb_mbuf_per_pool =
+ (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
+ }
+
+ if (!numa_support) {
+ if (socket_num == UMA_NO_CONFIG)
+ mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+ else
+ mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
+ socket_num);
+ }
+
+ FOREACH_PORT(pid, ports) {
+ port = &ports[pid];
+ rte_eth_dev_info_get(pid, &port->dev_info);
+
+ if (numa_support) {
+ if (port_numa[pid] != NUMA_NO_CONFIG)
+ port_per_socket[port_numa[pid]]++;
+ else {
+ uint32_t socket_id = rte_eth_dev_socket_id(pid);
+
+ /* if socket_id is invalid, set to 0 */
+ if (check_socket_id(socket_id) < 0)
+ socket_id = 0;
+ port_per_socket[socket_id]++;
+ }
+ }
+
+ /* set flag to initialize port/queue */
+ port->need_reconfig = 1;
+ port->need_reconfig_queues = 1;
+ }
+
+ if (numa_support) {
+ uint8_t i;
+ unsigned int nb_mbuf;
+
+ if (param_total_num_mbufs)
+ nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
+
+ for (i = 0; i < max_socket; i++) {
+ nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
+ if (nb_mbuf)
+ mbuf_pool_create(mbuf_data_size,
+ nb_mbuf,i);
+ }
+ }
+ init_port_config();
+
+ /*
+ * Records which Mbuf pool to use by each logical core, if needed.
+ */
+ for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
+ mbp = mbuf_pool_find(
+ rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
+
+ if (mbp == NULL)
+ mbp = mbuf_pool_find(0);
+ fwd_lcores[lc_id]->mbp = mbp;
+ }
+
+ /* Configuration of packet forwarding streams. */
+ if (init_fwd_streams() < 0)
+ rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
+}
+
+
+void
+reconfig(portid_t new_port_id, unsigned socket_id)
+{
+ struct rte_port *port;
+
+ /* Reconfiguration of Ethernet ports. */
+ port = &ports[new_port_id];
+ rte_eth_dev_info_get(new_port_id, &port->dev_info);
+
+ /* set flag to initialize port/queue */
+ port->need_reconfig = 1;
+ port->need_reconfig_queues = 1;
+ port->socket_id = socket_id;
+
+ init_port_config();
+}
+
+
+int
+init_fwd_streams(void)
+{
+ portid_t pid;
+ struct rte_port *port;
+ streamid_t sm_id, nb_fwd_streams_new;
+ queueid_t q;
+
+ /* set socket id according to numa or not */
+ FOREACH_PORT(pid, ports) {
+ port = &ports[pid];
+ if (nb_rxq > port->dev_info.max_rx_queues) {
+ printf("Fail: nb_rxq(%d) is greater than "
+ "max_rx_queues(%d)\n", nb_rxq,
+ port->dev_info.max_rx_queues);
+ return -1;
+ }
+ if (nb_txq > port->dev_info.max_tx_queues) {
+ printf("Fail: nb_txq(%d) is greater than "
+ "max_tx_queues(%d)\n", nb_txq,
+ port->dev_info.max_tx_queues);
+ return -1;
+ }
+ if (numa_support) {
+ if (port_numa[pid] != NUMA_NO_CONFIG)
+ port->socket_id = port_numa[pid];
+ else {
+ port->socket_id = rte_eth_dev_socket_id(pid);
+
+ /* if socket_id is invalid, set to 0 */
+ if (check_socket_id(port->socket_id) < 0)
+ port->socket_id = 0;
+ }
+ }
+ else {
+ if (socket_num == UMA_NO_CONFIG)
+ port->socket_id = 0;
+ else
+ port->socket_id = socket_num;
+ }
+ }
+
+ q = RTE_MAX(nb_rxq, nb_txq);
+ if (q == 0) {
+ printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
+ return -1;
+ }
+ nb_fwd_streams_new = (streamid_t)(nb_ports * q);
+ if (nb_fwd_streams_new == nb_fwd_streams)
+ return 0;
+ /* clear the old */
+ if (fwd_streams != NULL) {
+ for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
+ if (fwd_streams[sm_id] == NULL)
+ continue;
+ rte_free(fwd_streams[sm_id]);
+ fwd_streams[sm_id] = NULL;
+ }
+ rte_free(fwd_streams);
+ fwd_streams = NULL;
+ }
+
+ /* init new */
+ nb_fwd_streams = nb_fwd_streams_new;
+ fwd_streams = rte_zmalloc("testpmd: fwd_streams",
+ sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
+ if (fwd_streams == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
+ "failed\n", nb_fwd_streams);
+
+ for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
+ fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
+ sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
+ if (fwd_streams[sm_id] == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
+ " failed\n");
+ }
+
+ return 0;
+}
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+static void
+pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
+{
+ unsigned int total_burst;
+ unsigned int nb_burst;
+ unsigned int burst_stats[3];
+ uint16_t pktnb_stats[3];
+ uint16_t nb_pkt;
+ int burst_percent[3];
+
+ /*
+ * First compute the total number of packet bursts and the
+ * two highest numbers of bursts of the same number of packets.
+ */
+ total_burst = 0;
+ burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
+ pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
+ for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
+ nb_burst = pbs->pkt_burst_spread[nb_pkt];
+ if (nb_burst == 0)
+ continue;
+ total_burst += nb_burst;
+ if (nb_burst > burst_stats[0]) {
+ burst_stats[1] = burst_stats[0];
+ pktnb_stats[1] = pktnb_stats[0];
+ burst_stats[0] = nb_burst;
+ pktnb_stats[0] = nb_pkt;
+ }
+ }
+ if (total_burst == 0)
+ return;
+ burst_percent[0] = (burst_stats[0] * 100) / total_burst;
+ printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
+ burst_percent[0], (int) pktnb_stats[0]);
+ if (burst_stats[0] == total_burst) {
+ printf("]\n");
+ return;
+ }
+ if (burst_stats[0] + burst_stats[1] == total_burst) {
+ printf(" + %d%% of %d pkts]\n",
+ 100 - burst_percent[0], pktnb_stats[1]);
+ return;
+ }
+ burst_percent[1] = (burst_stats[1] * 100) / total_burst;
+ burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
+ if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
+ printf(" + %d%% of others]\n", 100 - burst_percent[0]);
+ return;
+ }
+ printf(" + %d%% of %d pkts + %d%% of others]\n",
+ burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
+}
+#endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
+
+static void
+fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
+{
+ struct rte_port *port;
+ uint8_t i;
+
+ static const char *fwd_stats_border = "----------------------";
+
+ port = &ports[port_id];
+ printf("\n %s Forward statistics for port %-2d %s\n",
+ fwd_stats_border, port_id, fwd_stats_border);
+
+ if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
+ printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
+ "%-"PRIu64"\n",
+ stats->ipackets, stats->imissed,
+ (uint64_t) (stats->ipackets + stats->imissed));
+
+ if (cur_fwd_eng == &csum_fwd_engine)
+ printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
+ port->rx_bad_ip_csum, port->rx_bad_l4_csum);
+ if ((stats->ierrors + stats->rx_nombuf) > 0) {
+ printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
+ printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
+ }
+
+ printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
+ "%-"PRIu64"\n",
+ stats->opackets, port->tx_dropped,
+ (uint64_t) (stats->opackets + port->tx_dropped));
+ }
+ else {
+ printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
+ "%14"PRIu64"\n",
+ stats->ipackets, stats->imissed,
+ (uint64_t) (stats->ipackets + stats->imissed));
+
+ if (cur_fwd_eng == &csum_fwd_engine)
+ printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
+ port->rx_bad_ip_csum, port->rx_bad_l4_csum);
+ if ((stats->ierrors + stats->rx_nombuf) > 0) {
+ printf(" RX-error:%"PRIu64"\n", stats->ierrors);
+ printf(" RX-nombufs: %14"PRIu64"\n",
+ stats->rx_nombuf);
+ }
+
+ printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
+ "%14"PRIu64"\n",
+ stats->opackets, port->tx_dropped,
+ (uint64_t) (stats->opackets + port->tx_dropped));
+ }
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ if (port->rx_stream)
+ pkt_burst_stats_display("RX",
+ &port->rx_stream->rx_burst_stats);
+ if (port->tx_stream)
+ pkt_burst_stats_display("TX",
+ &port->tx_stream->tx_burst_stats);
+#endif
+
+ if (port->rx_queue_stats_mapping_enabled) {
+ printf("\n");
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ printf(" Stats reg %2d RX-packets:%14"PRIu64
+ " RX-errors:%14"PRIu64
+ " RX-bytes:%14"PRIu64"\n",
+ i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
+ }
+ printf("\n");
+ }
+ if (port->tx_queue_stats_mapping_enabled) {
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ printf(" Stats reg %2d TX-packets:%14"PRIu64
+ " TX-bytes:%14"PRIu64"\n",
+ i, stats->q_opackets[i], stats->q_obytes[i]);
+ }
+ }
+
+ printf(" %s--------------------------------%s\n",
+ fwd_stats_border, fwd_stats_border);
+}
+
+static void
+fwd_stream_stats_display(streamid_t stream_id)
+{
+ struct fwd_stream *fs;
+ static const char *fwd_top_stats_border = "-------";
+
+ fs = fwd_streams[stream_id];
+ if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
+ (fs->fwd_dropped == 0))
+ return;
+ printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
+ "TX Port=%2d/Queue=%2d %s\n",
+ fwd_top_stats_border, fs->rx_port, fs->rx_queue,
+ fs->tx_port, fs->tx_queue, fwd_top_stats_border);
+ printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
+ fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
+
+ /* if checksum mode */
+ if (cur_fwd_eng == &csum_fwd_engine) {
+ printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
+ "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
+ }
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ pkt_burst_stats_display("RX", &fs->rx_burst_stats);
+ pkt_burst_stats_display("TX", &fs->tx_burst_stats);
+#endif
+}
+
+static void
+flush_fwd_rx_queues(void)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ portid_t rxp;
+ portid_t port_id;
+ queueid_t rxq;
+ uint16_t nb_rx;
+ uint16_t i;
+ uint8_t j;
+
+ for (j = 0; j < 2; j++) {
+ for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
+ for (rxq = 0; rxq < nb_rxq; rxq++) {
+ port_id = fwd_ports_ids[rxp];
+ do {
+ nb_rx = rte_eth_rx_burst(port_id, rxq,
+ pkts_burst, MAX_PKT_BURST);
+ for (i = 0; i < nb_rx; i++)
+ rte_pktmbuf_free(pkts_burst[i]);
+ } while (nb_rx > 0);
+ }
+ }
+ rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
+ }
+}
+
+static void
+run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
+{
+ struct fwd_stream **fsm;
+ streamid_t nb_fs;
+ streamid_t sm_id;
+
+ fsm = &fwd_streams[fc->stream_idx];
+ nb_fs = fc->stream_nb;
+ do {
+ for (sm_id = 0; sm_id < nb_fs; sm_id++)
+ (*pkt_fwd)(fsm[sm_id]);
+ } while (! fc->stopped);
+}
+
+static int
+start_pkt_forward_on_core(void *fwd_arg)
+{
+ run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
+ cur_fwd_config.fwd_eng->packet_fwd);
+ return 0;
+}
+
+/*
+ * Run the TXONLY packet forwarding engine to send a single burst of packets.
+ * Used to start communication flows in network loopback test configurations.
+ */
+static int
+run_one_txonly_burst_on_core(void *fwd_arg)
+{
+ struct fwd_lcore *fwd_lc;
+ struct fwd_lcore tmp_lcore;
+
+ fwd_lc = (struct fwd_lcore *) fwd_arg;
+ tmp_lcore = *fwd_lc;
+ tmp_lcore.stopped = 1;
+ run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
+ return 0;
+}
+
+/*
+ * Launch packet forwarding:
+ * - Setup per-port forwarding context.
+ * - launch logical cores with their forwarding configuration.
+ */
+static void
+launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
+{
+ port_fwd_begin_t port_fwd_begin;
+ unsigned int i;
+ unsigned int lc_id;
+ int diag;
+
+ port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
+ if (port_fwd_begin != NULL) {
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
+ (*port_fwd_begin)(fwd_ports_ids[i]);
+ }
+ for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
+ lc_id = fwd_lcores_cpuids[i];
+ if ((interactive == 0) || (lc_id != rte_lcore_id())) {
+ fwd_lcores[i]->stopped = 0;
+ diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
+ fwd_lcores[i], lc_id);
+ if (diag != 0)
+ printf("launch lcore %u failed - diag=%d\n",
+ lc_id, diag);
+ }
+ }
+}
+
+/*
+ * Launch packet forwarding configuration.
+ */
+void
+start_packet_forwarding(int with_tx_first)
+{
+ port_fwd_begin_t port_fwd_begin;
+ port_fwd_end_t port_fwd_end;
+ struct rte_port *port;
+ unsigned int i;
+ portid_t pt_id;
+ streamid_t sm_id;
+
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
+ rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
+
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
+ rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
+
+ if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
+ strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
+ (!nb_rxq || !nb_txq))
+ rte_exit(EXIT_FAILURE,
+ "Either rxq or txq are 0, cannot use %s fwd mode\n",
+ cur_fwd_eng->fwd_mode_name);
+
+ if (all_ports_started() == 0) {
+ printf("Not all ports were started\n");
+ return;
+ }
+ if (test_done == 0) {
+ printf("Packet forwarding already started\n");
+ return;
+ }
+ if(dcb_test) {
+ for (i = 0; i < nb_fwd_ports; i++) {
+ pt_id = fwd_ports_ids[i];
+ port = &ports[pt_id];
+ if (!port->dcb_flag) {
+ printf("In DCB mode, all forwarding ports must "
+ "be configured in this mode.\n");
+ return;
+ }
+ }
+ if (nb_fwd_lcores == 1) {
+ printf("In DCB mode,the nb forwarding cores "
+ "should be larger than 1.\n");
+ return;
+ }
+ }
+ test_done = 0;
+
+ if(!no_flush_rx)
+ flush_fwd_rx_queues();
+
+ fwd_config_setup();
+ rxtx_config_display();
+
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+ pt_id = fwd_ports_ids[i];
+ port = &ports[pt_id];
+ rte_eth_stats_get(pt_id, &port->stats);
+ port->tx_dropped = 0;
+
+ map_port_queue_stats_mapping_registers(pt_id, port);
+ }
+ for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+ fwd_streams[sm_id]->rx_packets = 0;
+ fwd_streams[sm_id]->tx_packets = 0;
+ fwd_streams[sm_id]->fwd_dropped = 0;
+ fwd_streams[sm_id]->rx_bad_ip_csum = 0;
+ fwd_streams[sm_id]->rx_bad_l4_csum = 0;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
+ sizeof(fwd_streams[sm_id]->rx_burst_stats));
+ memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
+ sizeof(fwd_streams[sm_id]->tx_burst_stats));
+#endif
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ fwd_streams[sm_id]->core_cycles = 0;
+#endif
+ }
+ if (with_tx_first) {
+ port_fwd_begin = tx_only_engine.port_fwd_begin;
+ if (port_fwd_begin != NULL) {
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
+ (*port_fwd_begin)(fwd_ports_ids[i]);
+ }
+ launch_packet_forwarding(run_one_txonly_burst_on_core);
+ rte_eal_mp_wait_lcore();
+ port_fwd_end = tx_only_engine.port_fwd_end;
+ if (port_fwd_end != NULL) {
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
+ (*port_fwd_end)(fwd_ports_ids[i]);
+ }
+ }
+ launch_packet_forwarding(start_pkt_forward_on_core);
+}
+
+void
+stop_packet_forwarding(void)
+{
+ struct rte_eth_stats stats;
+ struct rte_port *port;
+ port_fwd_end_t port_fwd_end;
+ int i;
+ portid_t pt_id;
+ streamid_t sm_id;
+ lcoreid_t lc_id;
+ uint64_t total_recv;
+ uint64_t total_xmit;
+ uint64_t total_rx_dropped;
+ uint64_t total_tx_dropped;
+ uint64_t total_rx_nombuf;
+ uint64_t tx_dropped;
+ uint64_t rx_bad_ip_csum;
+ uint64_t rx_bad_l4_csum;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t fwd_cycles;
+#endif
+ static const char *acc_stats_border = "+++++++++++++++";
+
+ if (all_ports_started() == 0) {
+ printf("Not all ports were started\n");
+ return;
+ }
+ if (test_done) {
+ printf("Packet forwarding not started\n");
+ return;
+ }
+ printf("Telling cores to stop...");
+ for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
+ fwd_lcores[lc_id]->stopped = 1;
+ printf("\nWaiting for lcores to finish...\n");
+ rte_eal_mp_wait_lcore();
+ port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
+ if (port_fwd_end != NULL) {
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+ pt_id = fwd_ports_ids[i];
+ (*port_fwd_end)(pt_id);
+ }
+ }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ fwd_cycles = 0;
+#endif
+ for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+ if (cur_fwd_config.nb_fwd_streams >
+ cur_fwd_config.nb_fwd_ports) {
+ fwd_stream_stats_display(sm_id);
+ ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
+ ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
+ } else {
+ ports[fwd_streams[sm_id]->tx_port].tx_stream =
+ fwd_streams[sm_id];
+ ports[fwd_streams[sm_id]->rx_port].rx_stream =
+ fwd_streams[sm_id];
+ }
+ tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
+ tx_dropped = (uint64_t) (tx_dropped +
+ fwd_streams[sm_id]->fwd_dropped);
+ ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
+
+ rx_bad_ip_csum =
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
+ rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
+ fwd_streams[sm_id]->rx_bad_ip_csum);
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
+ rx_bad_ip_csum;
+
+ rx_bad_l4_csum =
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
+ rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
+ fwd_streams[sm_id]->rx_bad_l4_csum);
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
+ rx_bad_l4_csum;
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ fwd_cycles = (uint64_t) (fwd_cycles +
+ fwd_streams[sm_id]->core_cycles);
+#endif
+ }
+ total_recv = 0;
+ total_xmit = 0;
+ total_rx_dropped = 0;
+ total_tx_dropped = 0;
+ total_rx_nombuf = 0;
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+ pt_id = fwd_ports_ids[i];
+
+ port = &ports[pt_id];
+ rte_eth_stats_get(pt_id, &stats);
+ stats.ipackets -= port->stats.ipackets;
+ port->stats.ipackets = 0;
+ stats.opackets -= port->stats.opackets;
+ port->stats.opackets = 0;
+ stats.ibytes -= port->stats.ibytes;
+ port->stats.ibytes = 0;
+ stats.obytes -= port->stats.obytes;
+ port->stats.obytes = 0;
+ stats.imissed -= port->stats.imissed;
+ port->stats.imissed = 0;
+ stats.oerrors -= port->stats.oerrors;
+ port->stats.oerrors = 0;
+ stats.rx_nombuf -= port->stats.rx_nombuf;
+ port->stats.rx_nombuf = 0;
+
+ total_recv += stats.ipackets;
+ total_xmit += stats.opackets;
+ total_rx_dropped += stats.imissed;
+ total_tx_dropped += port->tx_dropped;
+ total_rx_nombuf += stats.rx_nombuf;
+
+ fwd_port_stats_display(pt_id, &stats);
+ }
+ printf("\n %s Accumulated forward statistics for all ports"
+ "%s\n",
+ acc_stats_border, acc_stats_border);
+ printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
+ "%-"PRIu64"\n"
+ " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
+ "%-"PRIu64"\n",
+ total_recv, total_rx_dropped, total_recv + total_rx_dropped,
+ total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
+ if (total_rx_nombuf > 0)
+ printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
+ printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
+ "%s\n",
+ acc_stats_border, acc_stats_border);
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ if (total_recv > 0)
+ printf("\n CPU cycles/packet=%u (total cycles="
+ "%"PRIu64" / total RX packets=%"PRIu64")\n",
+ (unsigned int)(fwd_cycles / total_recv),
+ fwd_cycles, total_recv);
+#endif
+ printf("\nDone.\n");
+ test_done = 1;
+}
+
+void
+dev_set_link_up(portid_t pid)
+{
+ if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
+ printf("\nSet link up fail.\n");
+}
+
+void
+dev_set_link_down(portid_t pid)
+{
+ if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
+ printf("\nSet link down fail.\n");
+}
+
+static int
+all_ports_started(void)
+{
+ portid_t pi;
+ struct rte_port *port;
+
+ FOREACH_PORT(pi, ports) {
+ port = &ports[pi];
+ /* Check if there is a port which is not started */
+ if ((port->port_status != RTE_PORT_STARTED) &&
+ (port->slave_flag == 0))
+ return 0;
+ }
+
+ /* No port is not started */
+ return 1;
+}
+
+int
+all_ports_stopped(void)
+{
+ portid_t pi;
+ struct rte_port *port;
+
+ FOREACH_PORT(pi, ports) {
+ port = &ports[pi];
+ if ((port->port_status != RTE_PORT_STOPPED) &&
+ (port->slave_flag == 0))
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+port_is_started(portid_t port_id)
+{
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return 0;
+
+ if (ports[port_id].port_status != RTE_PORT_STARTED)
+ return 0;
+
+ return 1;
+}
+
+static int
+port_is_closed(portid_t port_id)
+{
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return 0;
+
+ if (ports[port_id].port_status != RTE_PORT_CLOSED)
+ return 0;
+
+ return 1;
+}
+
+int
+start_port(portid_t pid)
+{
+ int diag, need_check_link_status = -1;
+ portid_t pi;
+ queueid_t qi;
+ struct rte_port *port;
+ struct ether_addr mac_addr;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return -1;
+ }
+
+ if (port_id_is_invalid(pid, ENABLED_WARN))
+ return 0;
+
+ if (init_fwd_streams() < 0) {
+ printf("Fail from init_fwd_streams()\n");
+ return -1;
+ }
+
+ if(dcb_config)
+ dcb_test = 1;
+ FOREACH_PORT(pi, ports) {
+ if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
+ continue;
+
+ need_check_link_status = 0;
+ port = &ports[pi];
+ if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
+ RTE_PORT_HANDLING) == 0) {
+ printf("Port %d is now not stopped\n", pi);
+ continue;
+ }
+
+ if (port->need_reconfig > 0) {
+ port->need_reconfig = 0;
+
+ printf("Configuring Port %d (socket %u)\n", pi,
+ port->socket_id);
+ /* configure port */
+ diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
+ &(port->dev_conf));
+ if (diag != 0) {
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back "
+ "to stopped\n", pi);
+ printf("Fail to configure port %d\n", pi);
+ /* try to reconfigure port next time */
+ port->need_reconfig = 1;
+ return -1;
+ }
+ }
+ if (port->need_reconfig_queues > 0) {
+ port->need_reconfig_queues = 0;
+ /* setup tx queues */
+ for (qi = 0; qi < nb_txq; qi++) {
+ if ((numa_support) &&
+ (txring_numa[pi] != NUMA_NO_CONFIG))
+ diag = rte_eth_tx_queue_setup(pi, qi,
+ nb_txd,txring_numa[pi],
+ &(port->tx_conf));
+ else
+ diag = rte_eth_tx_queue_setup(pi, qi,
+ nb_txd,port->socket_id,
+ &(port->tx_conf));
+
+ if (diag == 0)
+ continue;
+
+ /* Fail to setup tx queue, return */
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING,
+ RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back "
+ "to stopped\n", pi);
+ printf("Fail to configure port %d tx queues\n", pi);
+ /* try to reconfigure queues next time */
+ port->need_reconfig_queues = 1;
+ return -1;
+ }
+ /* setup rx queues */
+ for (qi = 0; qi < nb_rxq; qi++) {
+ if ((numa_support) &&
+ (rxring_numa[pi] != NUMA_NO_CONFIG)) {
+ struct rte_mempool * mp =
+ mbuf_pool_find(rxring_numa[pi]);
+ if (mp == NULL) {
+ printf("Failed to setup RX queue:"
+ "No mempool allocation"
+ "on the socket %d\n",
+ rxring_numa[pi]);
+ return -1;
+ }
+
+ diag = rte_eth_rx_queue_setup(pi, qi,
+ nb_rxd,rxring_numa[pi],
+ &(port->rx_conf),mp);
+ }
+ else
+ diag = rte_eth_rx_queue_setup(pi, qi,
+ nb_rxd,port->socket_id,
+ &(port->rx_conf),
+ mbuf_pool_find(port->socket_id));
+
+ if (diag == 0)
+ continue;
+
+
+ /* Fail to setup rx queue, return */
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING,
+ RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back "
+ "to stopped\n", pi);
+ printf("Fail to configure port %d rx queues\n", pi);
+ /* try to reconfigure queues next time */
+ port->need_reconfig_queues = 1;
+ return -1;
+ }
+ }
+ /* start port */
+ if (rte_eth_dev_start(pi) < 0) {
+ printf("Fail to start port %d\n", pi);
+
+ /* Fail to setup rx queue, return */
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back to "
+ "stopped\n", pi);
+ continue;
+ }
+
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
+ printf("Port %d can not be set into started\n", pi);
+
+ rte_eth_macaddr_get(pi, &mac_addr);
+ printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
+ mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
+ mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
+ mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
+
+ /* at least one port started, need checking link status */
+ need_check_link_status = 1;
+ }
+
+ if (need_check_link_status == 1 && !no_link_check)
+ check_all_ports_link_status(RTE_PORT_ALL);
+ else if (need_check_link_status == 0)
+ printf("Please stop the ports first\n");
+
+ printf("Done\n");
+ return 0;
+}
+
+void
+stop_port(portid_t pid)
+{
+ portid_t pi;
+ struct rte_port *port;
+ int need_check_link_status = 0;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+ if (dcb_test) {
+ dcb_test = 0;
+ dcb_config = 0;
+ }
+
+ if (port_id_is_invalid(pid, ENABLED_WARN))
+ return;
+
+ printf("Stopping ports...\n");
+
+ FOREACH_PORT(pi, ports) {
+ if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
+ continue;
+
+ port = &ports[pi];
+ if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
+ RTE_PORT_HANDLING) == 0)
+ continue;
+
+ rte_eth_dev_stop(pi);
+
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set into stopped\n", pi);
+ need_check_link_status = 1;
+ }
+ if (need_check_link_status && !no_link_check)
+ check_all_ports_link_status(RTE_PORT_ALL);
+
+ printf("Done\n");
+}
+
+void
+close_port(portid_t pid)
+{
+ portid_t pi;
+ struct rte_port *port;
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+
+ if (port_id_is_invalid(pid, ENABLED_WARN))
+ return;
+
+ printf("Closing ports...\n");
+
+ FOREACH_PORT(pi, ports) {
+ if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
+ continue;
+
+ port = &ports[pi];
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
+ printf("Port %d is already closed\n", pi);
+ continue;
+ }
+
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
+ printf("Port %d is now not stopped\n", pi);
+ continue;
+ }
+
+ rte_eth_dev_close(pi);
+
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
+ printf("Port %d can not be set into stopped\n", pi);
+ }
+
+ printf("Done\n");
+}
+
+void
+attach_port(char *identifier)
+{
+ portid_t i, j, pi = 0;
+
+ printf("Attaching a new port...\n");
+
+ if (identifier == NULL) {
+ printf("Invalid parameters are specified\n");
+ return;
+ }
+
+ if (test_done == 0) {
+ printf("Please stop forwarding first\n");
+ return;
+ }
+
+ if (rte_eth_dev_attach(identifier, &pi))
+ return;
+
+ ports[pi].enabled = 1;
+ reconfig(pi, rte_eth_dev_socket_id(pi));
+ rte_eth_promiscuous_enable(pi);
+
+ nb_ports = rte_eth_dev_count();
+
+ /* set_default_fwd_ports_config(); */
+ memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
+ i = 0;
+ FOREACH_PORT(j, ports) {
+ fwd_ports_ids[i] = j;
+ i++;
+ }
+ nb_cfg_ports = nb_ports;
+ nb_fwd_ports++;
+
+ ports[pi].port_status = RTE_PORT_STOPPED;
+
+ printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
+ printf("Done\n");
+}
+
+void
+detach_port(uint8_t port_id)
+{
+ portid_t i, pi = 0;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ printf("Detaching a port...\n");
+
+ if (!port_is_closed(port_id)) {
+ printf("Please close port first\n");
+ return;
+ }
+
+ if (rte_eth_dev_detach(port_id, name))
+ return;
+
+ ports[port_id].enabled = 0;
+ nb_ports = rte_eth_dev_count();
+
+ /* set_default_fwd_ports_config(); */
+ memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
+ i = 0;
+ FOREACH_PORT(pi, ports) {
+ fwd_ports_ids[i] = pi;
+ i++;
+ }
+ nb_cfg_ports = nb_ports;
+ nb_fwd_ports--;
+
+ printf("Port '%s' is detached. Now total ports is %d\n",
+ name, nb_ports);
+ printf("Done\n");
+ return;
+}
+
+void
+pmd_test_exit(void)
+{
+ portid_t pt_id;
+
+ if (test_done == 0)
+ stop_packet_forwarding();
+
+ if (ports != NULL) {
+ no_link_check = 1;
+ FOREACH_PORT(pt_id, ports) {
+ printf("\nShutting down port %d...\n", pt_id);
+ fflush(stdout);
+ stop_port(pt_id);
+ close_port(pt_id);
+ }
+ }
+ printf("\nBye...\n");
+}
+
+typedef void (*cmd_func_t)(void);
+struct pmd_test_command {
+ const char *cmd_name;
+ cmd_func_t cmd_func;
+};
+
+#define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint8_t portid, count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("Checking link statuses...\n");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ FOREACH_PORT(portid, ports) {
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(portid, &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf("Port %d Link Up - speed %u "
+ "Mbps - %s\n", (uint8_t)portid,
+ (unsigned)link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ printf("Port %d Link Down\n",
+ (uint8_t)portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ }
+ }
+}
+
+static int
+set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
+{
+ uint16_t i;
+ int diag;
+ uint8_t mapping_found = 0;
+
+ for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
+ if ((tx_queue_stats_mappings[i].port_id == port_id) &&
+ (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
+ diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
+ tx_queue_stats_mappings[i].queue_id,
+ tx_queue_stats_mappings[i].stats_counter_id);
+ if (diag != 0)
+ return diag;
+ mapping_found = 1;
+ }
+ }
+ if (mapping_found)
+ port->tx_queue_stats_mapping_enabled = 1;
+ return 0;
+}
+
+static int
+set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
+{
+ uint16_t i;
+ int diag;
+ uint8_t mapping_found = 0;
+
+ for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
+ if ((rx_queue_stats_mappings[i].port_id == port_id) &&
+ (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
+ diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
+ rx_queue_stats_mappings[i].queue_id,
+ rx_queue_stats_mappings[i].stats_counter_id);
+ if (diag != 0)
+ return diag;
+ mapping_found = 1;
+ }
+ }
+ if (mapping_found)
+ port->rx_queue_stats_mapping_enabled = 1;
+ return 0;
+}
+
+static void
+map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
+{
+ int diag = 0;
+
+ diag = set_tx_queue_stats_mapping_registers(pi, port);
+ if (diag != 0) {
+ if (diag == -ENOTSUP) {
+ port->tx_queue_stats_mapping_enabled = 0;
+ printf("TX queue stats mapping not supported port id=%d\n", pi);
+ }
+ else
+ rte_exit(EXIT_FAILURE,
+ "set_tx_queue_stats_mapping_registers "
+ "failed for port id=%d diag=%d\n",
+ pi, diag);
+ }
+
+ diag = set_rx_queue_stats_mapping_registers(pi, port);
+ if (diag != 0) {
+ if (diag == -ENOTSUP) {
+ port->rx_queue_stats_mapping_enabled = 0;
+ printf("RX queue stats mapping not supported port id=%d\n", pi);
+ }
+ else
+ rte_exit(EXIT_FAILURE,
+ "set_rx_queue_stats_mapping_registers "
+ "failed for port id=%d diag=%d\n",
+ pi, diag);
+ }
+}
+
+static void
+rxtx_port_config(struct rte_port *port)
+{
+ port->rx_conf = port->dev_info.default_rxconf;
+ port->tx_conf = port->dev_info.default_txconf;
+
+ /* Check if any RX/TX parameters have been passed */
+ if (rx_pthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf.rx_thresh.pthresh = rx_pthresh;
+
+ if (rx_hthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf.rx_thresh.hthresh = rx_hthresh;
+
+ if (rx_wthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf.rx_thresh.wthresh = rx_wthresh;
+
+ if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf.rx_free_thresh = rx_free_thresh;
+
+ if (rx_drop_en != RTE_PMD_PARAM_UNSET)
+ port->rx_conf.rx_drop_en = rx_drop_en;
+
+ if (tx_pthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf.tx_thresh.pthresh = tx_pthresh;
+
+ if (tx_hthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf.tx_thresh.hthresh = tx_hthresh;
+
+ if (tx_wthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf.tx_thresh.wthresh = tx_wthresh;
+
+ if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf.tx_rs_thresh = tx_rs_thresh;
+
+ if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf.tx_free_thresh = tx_free_thresh;
+
+ if (txq_flags != RTE_PMD_PARAM_UNSET)
+ port->tx_conf.txq_flags = txq_flags;
+}
+
+void
+init_port_config(void)
+{
+ portid_t pid;
+ struct rte_port *port;
+
+ FOREACH_PORT(pid, ports) {
+ port = &ports[pid];
+ port->dev_conf.rxmode = rx_mode;
+ port->dev_conf.fdir_conf = fdir_conf;
+ if (nb_rxq > 1) {
+ port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+ port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
+ } else {
+ port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+ port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
+ }
+
+ if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
+ if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
+ port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ else
+ port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+ }
+
+ if (port->dev_info.max_vfs != 0) {
+ if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
+ port->dev_conf.rxmode.mq_mode =
+ ETH_MQ_RX_VMDQ_RSS;
+ else
+ port->dev_conf.rxmode.mq_mode =
+ ETH_MQ_RX_NONE;
+
+ port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
+ }
+
+ rxtx_port_config(port);
+
+ rte_eth_macaddr_get(pid, &port->eth_addr);
+
+ map_port_queue_stats_mapping_registers(pid, port);
+#ifdef RTE_NIC_BYPASS
+ rte_eth_dev_bypass_init(pid);
+#endif
+ }
+}
+
+void set_port_slave_flag(portid_t slave_pid)
+{
+ struct rte_port *port;
+
+ port = &ports[slave_pid];
+ port->slave_flag = 1;
+}
+
+void clear_port_slave_flag(portid_t slave_pid)
+{
+ struct rte_port *port;
+
+ port = &ports[slave_pid];
+ port->slave_flag = 0;
+}
+
+const uint16_t vlan_tags[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31
+};
+
+static int
+get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
+ enum dcb_mode_enable dcb_mode,
+ enum rte_eth_nb_tcs num_tcs,
+ uint8_t pfc_en)
+{
+ uint8_t i;
+
+ /*
+ * Builds up the correct configuration for dcb+vt based on the vlan tags array
+ * given above, and the number of traffic classes available for use.
+ */
+ if (dcb_mode == DCB_VT_ENABLED) {
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &eth_conf->rx_adv_conf.vmdq_dcb_conf;
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+
+ /* VMDQ+DCB RX and TX configrations */
+ vmdq_rx_conf->enable_default_pool = 0;
+ vmdq_rx_conf->default_pool = 0;
+ vmdq_rx_conf->nb_queue_pools =
+ (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+ vmdq_tx_conf->nb_queue_pools =
+ (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+
+ vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
+ for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
+ vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
+ vmdq_rx_conf->pool_map[i].pools =
+ 1 << (i % vmdq_rx_conf->nb_queue_pools);
+ }
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ vmdq_rx_conf->dcb_tc[i] = i;
+ vmdq_tx_conf->dcb_tc[i] = i;
+ }
+
+ /* set DCB mode of RX and TX of multiple queues */
+ eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
+ eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ } else {
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &eth_conf->rx_adv_conf.dcb_rx_conf;
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &eth_conf->tx_adv_conf.dcb_tx_conf;
+
+ rx_conf->nb_tcs = num_tcs;
+ tx_conf->nb_tcs = num_tcs;
+
+ for (i = 0; i < num_tcs; i++) {
+ rx_conf->dcb_tc[i] = i;
+ tx_conf->dcb_tc[i] = i;
+ }
+ eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
+ eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
+ eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
+ }
+
+ if (pfc_en)
+ eth_conf->dcb_capability_en =
+ ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+ else
+ eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+
+ return 0;
+}
+
+int
+init_port_dcb_config(portid_t pid,
+ enum dcb_mode_enable dcb_mode,
+ enum rte_eth_nb_tcs num_tcs,
+ uint8_t pfc_en)
+{
+ struct rte_eth_conf port_conf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_port *rte_port;
+ int retval;
+ uint16_t i;
+
+ rte_eth_dev_info_get(pid, &dev_info);
+
+ /* If dev_info.vmdq_pool_base is greater than 0,
+ * the queue id of vmdq pools is started after pf queues.
+ */
+ if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
+ printf("VMDQ_DCB multi-queue mode is nonsensical"
+ " for port %d.", pid);
+ return -1;
+ }
+
+ /* Assume the ports in testpmd have the same dcb capability
+ * and has the same number of rxq and txq in dcb mode
+ */
+ if (dcb_mode == DCB_VT_ENABLED) {
+ nb_rxq = dev_info.max_rx_queues;
+ nb_txq = dev_info.max_tx_queues;
+ } else {
+ /*if vt is disabled, use all pf queues */
+ if (dev_info.vmdq_pool_base == 0) {
+ nb_rxq = dev_info.max_rx_queues;
+ nb_txq = dev_info.max_tx_queues;
+ } else {
+ nb_rxq = (queueid_t)num_tcs;
+ nb_txq = (queueid_t)num_tcs;
+
+ }
+ }
+ rx_free_thresh = 64;
+
+ memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+ /* Enter DCB configuration status */
+ dcb_config = 1;
+
+ /*set configuration of DCB in vt mode and DCB in non-vt mode*/
+ retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
+ if (retval < 0)
+ return retval;
+
+ rte_port = &ports[pid];
+ memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
+
+ rxtx_port_config(rte_port);
+ /* VLAN filter */
+ rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
+ for (i = 0; i < RTE_DIM(vlan_tags); i++)
+ rx_vft_set(pid, vlan_tags[i], 1);
+
+ rte_eth_macaddr_get(pid, &rte_port->eth_addr);
+ map_port_queue_stats_mapping_registers(pid, rte_port);
+
+ rte_port->dcb_flag = 1;
+
+ return 0;
+}
+
+static void
+init_port(void)
+{
+ portid_t pid;
+
+ /* Configuration of Ethernet ports. */
+ ports = rte_zmalloc("testpmd: ports",
+ sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
+ RTE_CACHE_LINE_SIZE);
+ if (ports == NULL) {
+ rte_exit(EXIT_FAILURE,
+ "rte_zmalloc(%d struct rte_port) failed\n",
+ RTE_MAX_ETHPORTS);
+ }
+
+ /* enabled allocated ports */
+ for (pid = 0; pid < nb_ports; pid++)
+ ports[pid].enabled = 1;
+}
+
+static void
+force_quit(void)
+{
+ pmd_test_exit();
+ prompt_exit();
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\nSignal %d received, preparing to exit...\n",
+ signum);
+ force_quit();
+ /* exit with the expected status */
+ signal(signum, SIG_DFL);
+ kill(getpid(), signum);
+ }
+}
+
+int
+main(int argc, char** argv)
+{
+ int diag;
+ uint8_t port_id;
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ diag = rte_eal_init(argc, argv);
+ if (diag < 0)
+ rte_panic("Cannot init EAL\n");
+
+ nb_ports = (portid_t) rte_eth_dev_count();
+ if (nb_ports == 0)
+ RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
+
+ /* allocate port structures, and init them */
+ init_port();
+
+ set_def_fwd_config();
+ if (nb_lcores == 0)
+ rte_panic("Empty set of forwarding logical cores - check the "
+ "core mask supplied in the command parameters\n");
+
+ argc -= diag;
+ argv += diag;
+ if (argc > 1)
+ launch_args_parse(argc, argv);
+
+ if (!nb_rxq && !nb_txq)
+ printf("Warning: Either rx or tx queues should be non-zero\n");
+
+ if (nb_rxq > 1 && nb_rxq > nb_txq)
+ printf("Warning: nb_rxq=%d enables RSS configuration, "
+ "but nb_txq=%d will prevent to fully test it.\n",
+ nb_rxq, nb_txq);
+
+ init_config();
+ if (start_port(RTE_PORT_ALL) != 0)
+ rte_exit(EXIT_FAILURE, "Start ports failed\n");
+
+ /* set all ports to promiscuous mode by default */
+ FOREACH_PORT(port_id, ports)
+ rte_eth_promiscuous_enable(port_id);
+
+#ifdef RTE_LIBRTE_CMDLINE
+ if (interactive == 1) {
+ if (auto_start) {
+ printf("Start automatic packet forwarding\n");
+ start_packet_forwarding(0);
+ }
+ prompt();
+ } else
+#endif
+ {
+ char c;
+ int rc;
+
+ printf("No commandline core given, start packet forwarding\n");
+ start_packet_forwarding(0);
+ printf("Press enter to exit\n");
+ rc = read(0, &c, 1);
+ pmd_test_exit();
+ if (rc < 0)
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
new file mode 100644
index 00000000..0f72ca1f
--- /dev/null
+++ b/app/test-pmd/testpmd.h
@@ -0,0 +1,603 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TESTPMD_H_
+#define _TESTPMD_H_
+
+#define RTE_PORT_ALL (~(portid_t)0x0)
+
+#define RTE_TEST_RX_DESC_MAX 2048
+#define RTE_TEST_TX_DESC_MAX 2048
+
+#define RTE_PORT_STOPPED (uint16_t)0
+#define RTE_PORT_STARTED (uint16_t)1
+#define RTE_PORT_CLOSED (uint16_t)2
+#define RTE_PORT_HANDLING (uint16_t)3
+
+/*
+ * Default size of the mbuf data buffer to receive standard 1518-byte
+ * Ethernet frames in a mono-segment memory buffer.
+ */
+#define DEFAULT_MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
+/**< Default size of mbuf data buffer. */
+
+/*
+ * The maximum number of segments per packet is used when creating
+ * scattered transmit packets composed of a list of mbufs.
+ */
+#define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */
+
+#define MAX_PKT_BURST 512
+#define DEF_PKT_BURST 32
+
+#define DEF_MBUF_CACHE 250
+
+#define RTE_CACHE_LINE_SIZE_ROUNDUP(size) \
+ (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE))
+
+#define NUMA_NO_CONFIG 0xFF
+#define UMA_NO_CONFIG 0xFF
+
+typedef uint8_t lcoreid_t;
+typedef uint8_t portid_t;
+typedef uint16_t queueid_t;
+typedef uint16_t streamid_t;
+
+#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
+
+enum {
+ PORT_TOPOLOGY_PAIRED,
+ PORT_TOPOLOGY_CHAINED,
+ PORT_TOPOLOGY_LOOP,
+};
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+/**
+ * The data structure associated with RX and TX packet burst statistics
+ * that are recorded for each forwarding stream.
+ */
+struct pkt_burst_stats {
+ unsigned int pkt_burst_spread[MAX_PKT_BURST];
+};
+#endif
+
+/**
+ * The data structure associated with a forwarding stream between a receive
+ * port/queue and a transmit port/queue.
+ */
+struct fwd_stream {
+ /* "read-only" data */
+ portid_t rx_port; /**< port to poll for received packets */
+ queueid_t rx_queue; /**< RX queue to poll on "rx_port" */
+ portid_t tx_port; /**< forwarding port of received packets */
+ queueid_t tx_queue; /**< TX queue to send forwarded packets */
+ streamid_t peer_addr; /**< index of peer ethernet address of packets */
+
+ /* "read-write" results */
+ unsigned int rx_packets; /**< received packets */
+ unsigned int tx_packets; /**< received packets transmitted */
+ unsigned int fwd_dropped; /**< received packets not forwarded */
+ unsigned int rx_bad_ip_csum ; /**< received packets has bad ip checksum */
+ unsigned int rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t core_cycles; /**< used for RX and TX processing */
+#endif
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ struct pkt_burst_stats rx_burst_stats;
+ struct pkt_burst_stats tx_burst_stats;
+#endif
+};
+
+/** Offload IP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_IP_CKSUM 0x0001
+/** Offload UDP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_UDP_CKSUM 0x0002
+/** Offload TCP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_TCP_CKSUM 0x0004
+/** Offload SCTP checksum in csum forward engine */
+#define TESTPMD_TX_OFFLOAD_SCTP_CKSUM 0x0008
+/** Offload outer IP checksum in csum forward engine for recognized tunnels */
+#define TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM 0x0010
+/** Parse tunnel in csum forward engine. If set, dissect tunnel headers
+ * of rx packets. If not set, treat inner headers as payload. */
+#define TESTPMD_TX_OFFLOAD_PARSE_TUNNEL 0x0020
+/** Insert VLAN header in forward engine */
+#define TESTPMD_TX_OFFLOAD_INSERT_VLAN 0x0040
+/** Insert double VLAN header in forward engine */
+#define TESTPMD_TX_OFFLOAD_INSERT_QINQ 0x0080
+
+/**
+ * The data structure associated with each port.
+ */
+struct rte_port {
+ uint8_t enabled; /**< Port enabled or not */
+ struct rte_eth_dev_info dev_info; /**< PCI info + driver name */
+ struct rte_eth_conf dev_conf; /**< Port configuration. */
+ struct ether_addr eth_addr; /**< Port ethernet address */
+ struct rte_eth_stats stats; /**< Last port statistics */
+ uint64_t tx_dropped; /**< If no descriptor in TX ring */
+ struct fwd_stream *rx_stream; /**< Port RX stream, if unique */
+ struct fwd_stream *tx_stream; /**< Port TX stream, if unique */
+ unsigned int socket_id; /**< For NUMA support */
+ uint16_t tx_ol_flags;/**< TX Offload Flags (TESTPMD_TX_OFFLOAD...). */
+ uint16_t tso_segsz; /**< MSS for segmentation offload. */
+ uint16_t tx_vlan_id;/**< The tag ID */
+ uint16_t tx_vlan_id_outer;/**< The outer tag ID */
+ void *fwd_ctx; /**< Forwarding mode context */
+ uint64_t rx_bad_ip_csum; /**< rx pkts with bad ip checksum */
+ uint64_t rx_bad_l4_csum; /**< rx pkts with bad l4 checksum */
+ uint8_t tx_queue_stats_mapping_enabled;
+ uint8_t rx_queue_stats_mapping_enabled;
+ volatile uint16_t port_status; /**< port started or not */
+ uint8_t need_reconfig; /**< need reconfiguring port or not */
+ uint8_t need_reconfig_queues; /**< need reconfiguring queues or not */
+ uint8_t rss_flag; /**< enable rss or not */
+ uint8_t dcb_flag; /**< enable dcb */
+ struct rte_eth_rxconf rx_conf; /**< rx configuration */
+ struct rte_eth_txconf tx_conf; /**< tx configuration */
+ struct ether_addr *mc_addr_pool; /**< pool of multicast addrs */
+ uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
+ uint8_t slave_flag; /**< bonding slave port */
+};
+
+extern portid_t __rte_unused
+find_next_port(portid_t p, struct rte_port *ports, int size);
+
+#define FOREACH_PORT(p, ports) \
+ for (p = find_next_port(0, ports, RTE_MAX_ETHPORTS); \
+ p < RTE_MAX_ETHPORTS; \
+ p = find_next_port(p + 1, ports, RTE_MAX_ETHPORTS))
+
+/**
+ * The data structure associated with each forwarding logical core.
+ * The logical cores are internally numbered by a core index from 0 to
+ * the maximum number of logical cores - 1.
+ * The system CPU identifier of all logical cores are setup in a global
+ * CPU id. configuration table.
+ */
+struct fwd_lcore {
+ struct rte_mempool *mbp; /**< The mbuf pool to use by this core */
+ streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */
+ streamid_t stream_nb; /**< number of streams in "fwd_streams" */
+ lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
+ queueid_t tx_queue; /**< TX queue to send forwarded packets */
+ volatile char stopped; /**< stop forwarding when set */
+};
+
+/*
+ * Forwarding mode operations:
+ * - IO forwarding mode (default mode)
+ * Forwards packets unchanged.
+ *
+ * - MAC forwarding mode
+ * Set the source and the destination Ethernet addresses of packets
+ * before forwarding them.
+ *
+ * - IEEE1588 forwarding mode
+ * Check that received IEEE1588 Precise Time Protocol (PTP) packets are
+ * filtered and timestamped by the hardware.
+ * Forwards packets unchanged on the same port.
+ * Check that sent IEEE1588 PTP packets are timestamped by the hardware.
+ */
+typedef void (*port_fwd_begin_t)(portid_t pi);
+typedef void (*port_fwd_end_t)(portid_t pi);
+typedef void (*packet_fwd_t)(struct fwd_stream *fs);
+
+struct fwd_engine {
+ const char *fwd_mode_name; /**< Forwarding mode name. */
+ port_fwd_begin_t port_fwd_begin; /**< NULL if nothing special to do. */
+ port_fwd_end_t port_fwd_end; /**< NULL if nothing special to do. */
+ packet_fwd_t packet_fwd; /**< Mandatory. */
+};
+
+extern struct fwd_engine io_fwd_engine;
+extern struct fwd_engine mac_fwd_engine;
+extern struct fwd_engine mac_retry_fwd_engine;
+extern struct fwd_engine mac_swap_engine;
+extern struct fwd_engine flow_gen_engine;
+extern struct fwd_engine rx_only_engine;
+extern struct fwd_engine tx_only_engine;
+extern struct fwd_engine csum_fwd_engine;
+extern struct fwd_engine icmp_echo_engine;
+#ifdef RTE_LIBRTE_IEEE1588
+extern struct fwd_engine ieee1588_fwd_engine;
+#endif
+
+extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */
+
+/**
+ * Forwarding Configuration
+ *
+ */
+struct fwd_config {
+ struct fwd_engine *fwd_eng; /**< Packet forwarding mode. */
+ streamid_t nb_fwd_streams; /**< Nb. of forward streams to process. */
+ lcoreid_t nb_fwd_lcores; /**< Nb. of logical cores to launch. */
+ portid_t nb_fwd_ports; /**< Nb. of ports involved. */
+};
+
+/**
+ * DCB mode enable
+ */
+enum dcb_mode_enable
+{
+ DCB_VT_ENABLED,
+ DCB_ENABLED
+};
+
+#define MAX_TX_QUEUE_STATS_MAPPINGS 1024 /* MAX_PORT of 32 @ 32 tx_queues/port */
+#define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */
+
+struct queue_stats_mappings {
+ uint8_t port_id;
+ uint16_t queue_id;
+ uint8_t stats_counter_id;
+} __rte_cache_aligned;
+
+extern struct queue_stats_mappings tx_queue_stats_mappings_array[];
+extern struct queue_stats_mappings rx_queue_stats_mappings_array[];
+
+/* Assign both tx and rx queue stats mappings to the same default values */
+extern struct queue_stats_mappings *tx_queue_stats_mappings;
+extern struct queue_stats_mappings *rx_queue_stats_mappings;
+
+extern uint16_t nb_tx_queue_stats_mappings;
+extern uint16_t nb_rx_queue_stats_mappings;
+
+/* globals used for configuration */
+extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */
+extern uint8_t interactive;
+extern uint8_t auto_start;
+extern uint8_t numa_support; /**< set by "--numa" parameter */
+extern uint16_t port_topology; /**< set by "--port-topology" parameter */
+extern uint8_t no_flush_rx; /**<set by "--no-flush-rx" parameter */
+extern uint8_t mp_anon; /**< set by "--mp-anon" parameter */
+extern uint8_t no_link_check; /**<set by "--disable-link-check" parameter */
+extern volatile int test_done; /* stop packet forwarding when set to 1. */
+
+#ifdef RTE_NIC_BYPASS
+extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */
+#endif
+
+/*
+ * Store specified sockets on which memory pool to be used by ports
+ * is allocated.
+ */
+uint8_t port_numa[RTE_MAX_ETHPORTS];
+
+/*
+ * Store specified sockets on which RX ring to be used by ports
+ * is allocated.
+ */
+uint8_t rxring_numa[RTE_MAX_ETHPORTS];
+
+/*
+ * Store specified sockets on which TX ring to be used by ports
+ * is allocated.
+ */
+uint8_t txring_numa[RTE_MAX_ETHPORTS];
+
+extern uint8_t socket_num;
+
+/*
+ * Configuration of logical cores:
+ * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
+ */
+extern lcoreid_t nb_lcores; /**< Number of logical cores probed at init time. */
+extern lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
+extern lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
+extern unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE];
+extern unsigned max_socket;
+
+/*
+ * Configuration of Ethernet ports:
+ * nb_fwd_ports <= nb_cfg_ports <= nb_ports
+ */
+extern portid_t nb_ports; /**< Number of ethernet ports probed at init time. */
+extern portid_t nb_cfg_ports; /**< Number of configured ports. */
+extern portid_t nb_fwd_ports; /**< Number of forwarding ports. */
+extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];
+extern struct rte_port *ports;
+
+extern struct rte_eth_rxmode rx_mode;
+extern uint64_t rss_hf;
+
+extern queueid_t nb_rxq;
+extern queueid_t nb_txq;
+
+extern uint16_t nb_rxd;
+extern uint16_t nb_txd;
+
+extern int16_t rx_free_thresh;
+extern int8_t rx_drop_en;
+extern int16_t tx_free_thresh;
+extern int16_t tx_rs_thresh;
+extern int32_t txq_flags;
+
+extern uint8_t dcb_config;
+extern uint8_t dcb_test;
+extern enum dcb_queue_mapping_mode dcb_q_mapping;
+
+extern uint16_t mbuf_data_size; /**< Mbuf data space size. */
+extern uint32_t param_total_num_mbufs;
+
+extern struct rte_fdir_conf fdir_conf;
+
+/*
+ * Configuration of packet segments used by the "txonly" processing engine.
+ */
+#define TXONLY_DEF_PACKET_LEN 64
+extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */
+extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */
+extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */
+
+enum tx_pkt_split {
+ TX_PKT_SPLIT_OFF,
+ TX_PKT_SPLIT_ON,
+ TX_PKT_SPLIT_RND,
+};
+
+extern enum tx_pkt_split tx_pkt_split;
+
+extern uint16_t nb_pkt_per_burst;
+extern uint16_t mb_mempool_cache;
+extern int8_t rx_pthresh;
+extern int8_t rx_hthresh;
+extern int8_t rx_wthresh;
+extern int8_t tx_pthresh;
+extern int8_t tx_hthresh;
+extern int8_t tx_wthresh;
+
+extern struct fwd_config cur_fwd_config;
+extern struct fwd_engine *cur_fwd_eng;
+extern struct fwd_lcore **fwd_lcores;
+extern struct fwd_stream **fwd_streams;
+
+extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */
+extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+
+extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
+extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */
+
+static inline unsigned int
+lcore_num(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; ++i)
+ if (fwd_lcores_cpuids[i] == rte_lcore_id())
+ return i;
+
+ rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
+}
+
+static inline struct fwd_lcore *
+current_fwd_lcore(void)
+{
+ return fwd_lcores[lcore_num()];
+}
+
+/* Mbuf Pools */
+static inline void
+mbuf_poolname_build(unsigned int sock_id, char* mp_name, int name_size)
+{
+ snprintf(mp_name, name_size, "mbuf_pool_socket_%u", sock_id);
+}
+
+static inline struct rte_mempool *
+mbuf_pool_find(unsigned int sock_id)
+{
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name));
+ return rte_mempool_lookup((const char *)pool_name);
+}
+
+/**
+ * Read/Write operations on a PCI register of a port.
+ */
+static inline uint32_t
+port_pci_reg_read(struct rte_port *port, uint32_t reg_off)
+{
+ void *reg_addr;
+ uint32_t reg_v;
+
+ reg_addr = (void *)
+ ((char *)port->dev_info.pci_dev->mem_resource[0].addr +
+ reg_off);
+ reg_v = *((volatile uint32_t *)reg_addr);
+ return rte_le_to_cpu_32(reg_v);
+}
+
+#define port_id_pci_reg_read(pt_id, reg_off) \
+ port_pci_reg_read(&ports[(pt_id)], (reg_off))
+
+static inline void
+port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v)
+{
+ void *reg_addr;
+
+ reg_addr = (void *)
+ ((char *)port->dev_info.pci_dev->mem_resource[0].addr +
+ reg_off);
+ *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v);
+}
+
+#define port_id_pci_reg_write(pt_id, reg_off, reg_value) \
+ port_pci_reg_write(&ports[(pt_id)], (reg_off), (reg_value))
+
+/* Prototypes */
+unsigned int parse_item_list(char* str, const char* item_name,
+ unsigned int max_items,
+ unsigned int *parsed_items, int check_unique_values);
+void launch_args_parse(int argc, char** argv);
+void prompt(void);
+void prompt_exit(void);
+void nic_stats_display(portid_t port_id);
+void nic_stats_clear(portid_t port_id);
+void nic_xstats_display(portid_t port_id);
+void nic_xstats_clear(portid_t port_id);
+void nic_stats_mapping_display(portid_t port_id);
+void port_infos_display(portid_t port_id);
+void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
+void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
+void fwd_lcores_config_display(void);
+void fwd_config_display(void);
+void rxtx_config_display(void);
+void fwd_config_setup(void);
+void set_def_fwd_config(void);
+void reconfig(portid_t new_port_id, unsigned socket_id);
+int init_fwd_streams(void);
+
+void port_mtu_set(portid_t port_id, uint16_t mtu);
+void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos);
+void port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
+ uint8_t bit_v);
+void port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
+ uint8_t bit1_pos, uint8_t bit2_pos);
+void port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
+ uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value);
+void port_reg_display(portid_t port_id, uint32_t reg_off);
+void port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t value);
+
+void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id);
+void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id);
+
+int set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc);
+int set_fwd_lcores_mask(uint64_t lcoremask);
+void set_fwd_lcores_number(uint16_t nb_lc);
+
+void set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt);
+void set_fwd_ports_mask(uint64_t portmask);
+void set_fwd_ports_number(uint16_t nb_pt);
+
+void rx_vlan_strip_set(portid_t port_id, int on);
+void rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on);
+
+void rx_vlan_filter_set(portid_t port_id, int on);
+void rx_vlan_all_filter_set(portid_t port_id, int on);
+int rx_vft_set(portid_t port_id, uint16_t vlan_id, int on);
+void vlan_extend_set(portid_t port_id, int on);
+void vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type,
+ uint16_t tp_id);
+void tx_vlan_set(portid_t port_id, uint16_t vlan_id);
+void tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer);
+void tx_vlan_reset(portid_t port_id);
+void tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on);
+
+void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value);
+
+void set_verbose_level(uint16_t vb_level);
+void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs);
+void show_tx_pkt_segments(void);
+void set_tx_pkt_split(const char *name);
+void set_nb_pkt_per_burst(uint16_t pkt_burst);
+char *list_pkt_forwarding_modes(void);
+void set_pkt_forwarding_mode(const char *fwd_mode);
+void start_packet_forwarding(int with_tx_first);
+void stop_packet_forwarding(void);
+void dev_set_link_up(portid_t pid);
+void dev_set_link_down(portid_t pid);
+void init_port_config(void);
+void set_port_slave_flag(portid_t slave_pid);
+void clear_port_slave_flag(portid_t slave_pid);
+int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode,
+ enum rte_eth_nb_tcs num_tcs,
+ uint8_t pfc_en);
+int start_port(portid_t pid);
+void stop_port(portid_t pid);
+void close_port(portid_t pid);
+void attach_port(char *identifier);
+void detach_port(uint8_t port_id);
+int all_ports_stopped(void);
+int port_is_started(portid_t port_id);
+void pmd_test_exit(void);
+void fdir_get_infos(portid_t port_id);
+void fdir_set_flex_mask(portid_t port_id,
+ struct rte_eth_fdir_flex_mask *cfg);
+void fdir_set_flex_payload(portid_t port_id,
+ struct rte_eth_flex_payload_cfg *cfg);
+void port_rss_reta_info(portid_t port_id,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t nb_entries);
+
+void set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on);
+void set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on);
+
+int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate);
+int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate,
+ uint64_t q_msk);
+
+void port_rss_hash_conf_show(portid_t port_id, char rss_info[],
+ int show_rss_key);
+void port_rss_hash_key_update(portid_t port_id, char rss_type[],
+ uint8_t *hash_key, uint hash_key_len);
+void get_syn_filter(uint8_t port_id);
+void get_ethertype_filter(uint8_t port_id, uint16_t index);
+void get_2tuple_filter(uint8_t port_id, uint16_t index);
+void get_5tuple_filter(uint8_t port_id, uint16_t index);
+int rx_queue_id_is_invalid(queueid_t rxq_id);
+int tx_queue_id_is_invalid(queueid_t txq_id);
+
+/* Functions to manage the set of filtered Multicast MAC addresses */
+void mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr);
+void mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr);
+void port_dcb_info_display(uint8_t port_id);
+
+enum print_warning {
+ ENABLED_WARN = 0,
+ DISABLED_WARN
+};
+int port_id_is_invalid(portid_t port_id, enum print_warning warning);
+
+/*
+ * Work-around of a compilation error with ICC on invocations of the
+ * rte_be_to_cpu_16() function.
+ */
+#ifdef __GCC__
+#define RTE_BE_TO_CPU_16(be_16_v) rte_be_to_cpu_16((be_16_v))
+#define RTE_CPU_TO_BE_16(cpu_16_v) rte_cpu_to_be_16((cpu_16_v))
+#else
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define RTE_BE_TO_CPU_16(be_16_v) (be_16_v)
+#define RTE_CPU_TO_BE_16(cpu_16_v) (cpu_16_v)
+#else
+#define RTE_BE_TO_CPU_16(be_16_v) \
+ (uint16_t) ((((be_16_v) & 0xFF) << 8) | ((be_16_v) >> 8))
+#define RTE_CPU_TO_BE_16(cpu_16_v) \
+ (uint16_t) ((((cpu_16_v) & 0xFF) << 8) | ((cpu_16_v) >> 8))
+#endif
+#endif /* __GCC__ */
+
+#endif /* _TESTPMD_H_ */
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
new file mode 100644
index 00000000..b37cae57
--- /dev/null
+++ b/app/test-pmd/txonly.c
@@ -0,0 +1,327 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+#define UDP_SRC_PORT 1024
+#define UDP_DST_PORT 1024
+
+#define IP_SRC_ADDR ((192U << 24) | (168 << 16) | (0 << 8) | 1)
+#define IP_DST_ADDR ((192U << 24) | (168 << 16) | (0 << 8) | 2)
+
+#define IP_DEFTTL 64 /* from RFC 1340. */
+#define IP_VERSION 0x40
+#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
+#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
+
+static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
+static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
+
+static inline struct rte_mbuf *
+tx_mbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return m;
+}
+
+static void
+copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
+ unsigned offset)
+{
+ struct rte_mbuf *seg;
+ void *seg_buf;
+ unsigned copy_len;
+
+ seg = pkt;
+ while (offset >= seg->data_len) {
+ offset -= seg->data_len;
+ seg = seg->next;
+ }
+ copy_len = seg->data_len - offset;
+ seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
+ while (len > copy_len) {
+ rte_memcpy(seg_buf, buf, (size_t) copy_len);
+ len -= copy_len;
+ buf = ((char*) buf + copy_len);
+ seg = seg->next;
+ seg_buf = rte_pktmbuf_mtod(seg, char *);
+ }
+ rte_memcpy(seg_buf, buf, (size_t) len);
+}
+
+static inline void
+copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
+{
+ if (offset + len <= pkt->data_len) {
+ rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset),
+ buf, (size_t) len);
+ return;
+ }
+ copy_buf_to_pkt_segs(buf, len, pkt, offset);
+}
+
+static void
+setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
+ struct udp_hdr *udp_hdr,
+ uint16_t pkt_data_len)
+{
+ uint16_t *ptr16;
+ uint32_t ip_cksum;
+ uint16_t pkt_len;
+
+ /*
+ * Initialize UDP header.
+ */
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
+ udp_hdr->src_port = rte_cpu_to_be_16(UDP_SRC_PORT);
+ udp_hdr->dst_port = rte_cpu_to_be_16(UDP_DST_PORT);
+ udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
+ udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
+
+ /*
+ * Initialize IP header.
+ */
+ pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr));
+ ip_hdr->version_ihl = IP_VHL_DEF;
+ ip_hdr->type_of_service = 0;
+ ip_hdr->fragment_offset = 0;
+ ip_hdr->time_to_live = IP_DEFTTL;
+ ip_hdr->next_proto_id = IPPROTO_UDP;
+ ip_hdr->packet_id = 0;
+ ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
+ ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR);
+ ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR);
+
+ /*
+ * Compute IP header checksum.
+ */
+ ptr16 = (unaligned_uint16_t*) ip_hdr;
+ ip_cksum = 0;
+ ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
+ ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
+ ip_cksum += ptr16[4];
+ ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
+ ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
+
+ /*
+ * Reduce 32 bit checksum to 16 bits and complement it.
+ */
+ ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
+ (ip_cksum & 0x0000FFFF);
+ if (ip_cksum > 65535)
+ ip_cksum -= 65535;
+ ip_cksum = (~ip_cksum) & 0x0000FFFF;
+ if (ip_cksum == 0)
+ ip_cksum = 0xFFFF;
+ ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
+}
+
+/*
+ * Transmit a burst of multi-segments packets.
+ */
+static void
+pkt_burst_transmit(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_port *txp;
+ struct rte_mbuf *pkt;
+ struct rte_mbuf *pkt_seg;
+ struct rte_mempool *mbp;
+ struct ether_hdr eth_hdr;
+ uint16_t nb_tx;
+ uint16_t nb_pkt;
+ uint16_t vlan_tci, vlan_tci_outer;
+ uint64_t ol_flags = 0;
+ uint8_t i;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+ uint32_t nb_segs, pkt_len;
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ mbp = current_fwd_lcore()->mbp;
+ txp = &ports[fs->tx_port];
+ vlan_tci = txp->tx_vlan_id;
+ vlan_tci_outer = txp->tx_vlan_id_outer;
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
+ ol_flags = PKT_TX_VLAN_PKT;
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
+ ol_flags |= PKT_TX_QINQ_PKT;
+ for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
+ pkt = tx_mbuf_alloc(mbp);
+ if (pkt == NULL) {
+ nomore_mbuf:
+ if (nb_pkt == 0)
+ return;
+ break;
+ }
+ pkt->data_len = tx_pkt_seg_lengths[0];
+ pkt_seg = pkt;
+ if (tx_pkt_split == TX_PKT_SPLIT_RND)
+ nb_segs = random() % tx_pkt_nb_segs + 1;
+ else
+ nb_segs = tx_pkt_nb_segs;
+ pkt_len = pkt->data_len;
+ for (i = 1; i < nb_segs; i++) {
+ pkt_seg->next = tx_mbuf_alloc(mbp);
+ if (pkt_seg->next == NULL) {
+ pkt->nb_segs = i;
+ rte_pktmbuf_free(pkt);
+ goto nomore_mbuf;
+ }
+ pkt_seg = pkt_seg->next;
+ pkt_seg->data_len = tx_pkt_seg_lengths[i];
+ pkt_len += pkt_seg->data_len;
+ }
+ pkt_seg->next = NULL; /* Last segment of packet. */
+
+ /*
+ * Initialize Ethernet header.
+ */
+ ether_addr_copy(&peer_eth_addrs[fs->peer_addr],&eth_hdr.d_addr);
+ ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr.s_addr);
+ eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+
+ /*
+ * Copy headers in first packet segment(s).
+ */
+ copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
+ copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
+ sizeof(struct ether_hdr));
+ copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
+ sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr));
+
+ /*
+ * Complete first mbuf of packet and append it to the
+ * burst of packets to be transmitted.
+ */
+ pkt->nb_segs = nb_segs;
+ pkt->pkt_len = pkt_len;
+ pkt->ol_flags = ol_flags;
+ pkt->vlan_tci = vlan_tci;
+ pkt->vlan_tci_outer = vlan_tci_outer;
+ pkt->l2_len = sizeof(struct ether_hdr);
+ pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkts_burst[nb_pkt] = pkt;
+ }
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+ fs->tx_packets += nb_tx;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+ if (unlikely(nb_tx < nb_pkt)) {
+ if (verbose_level > 0 && fs->fwd_dropped == 0)
+ printf("port %d tx_queue %d - drop "
+ "(nb_pkt:%u - nb_tx:%u)=%u packets\n",
+ fs->tx_port, fs->tx_queue,
+ (unsigned) nb_pkt, (unsigned) nb_tx,
+ (unsigned) (nb_pkt - nb_tx));
+ fs->fwd_dropped += (nb_pkt - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_pkt);
+ }
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+static void
+tx_only_begin(__attribute__((unused)) portid_t pi)
+{
+ uint16_t pkt_data_len;
+
+ pkt_data_len = (uint16_t) (tx_pkt_length - (sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ sizeof(struct udp_hdr)));
+ setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
+}
+
+struct fwd_engine tx_only_engine = {
+ .fwd_mode_name = "txonly",
+ .port_fwd_begin = tx_only_begin,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_transmit,
+};