aboutsummaryrefslogtreecommitdiffstats
path: root/examples/ip_pipeline
diff options
context:
space:
mode:
authorC.J. Collier <cjcollier@linuxfoundation.org>2016-06-14 07:50:17 -0700
committerC.J. Collier <cjcollier@linuxfoundation.org>2016-06-14 12:17:54 -0700
commit97f17497d162afdb82c8704bf097f0fee3724b2e (patch)
tree1c6269614c0c15ffef8451c58ae8f8b30a1bc804 /examples/ip_pipeline
parente04be89c2409570e0055b2cda60bd11395bb93b0 (diff)
Imported Upstream version 16.04
Change-Id: I77eadcd8538a9122e4773cbe55b24033dc451757 Signed-off-by: C.J. Collier <cjcollier@linuxfoundation.org>
Diffstat (limited to 'examples/ip_pipeline')
-rw-r--r--examples/ip_pipeline/Makefile79
-rw-r--r--examples/ip_pipeline/app.h949
-rw-r--r--examples/ip_pipeline/config/edge_router_downstream.cfg85
-rw-r--r--examples/ip_pipeline/config/edge_router_downstream.sh10
-rw-r--r--examples/ip_pipeline/config/edge_router_upstream.cfg110
-rw-r--r--examples/ip_pipeline/config/edge_router_upstream.sh38
-rw-r--r--examples/ip_pipeline/config/ip_pipeline.cfg9
-rw-r--r--examples/ip_pipeline/config/ip_pipeline.sh5
-rw-r--r--examples/ip_pipeline/config/l2fwd.cfg55
-rw-r--r--examples/ip_pipeline/config/l3fwd.cfg63
-rw-r--r--examples/ip_pipeline/config/l3fwd.sh9
-rw-r--r--examples/ip_pipeline/config/tm_profile.cfg105
-rw-r--r--examples/ip_pipeline/config_check.c444
-rw-r--r--examples/ip_pipeline/config_parse.c3383
-rw-r--r--examples/ip_pipeline/config_parse_tm.c448
-rw-r--r--examples/ip_pipeline/cpu_core_map.c492
-rw-r--r--examples/ip_pipeline/cpu_core_map.h69
-rw-r--r--examples/ip_pipeline/init.c1637
-rw-r--r--examples/ip_pipeline/main.c64
-rw-r--r--examples/ip_pipeline/parser.h50
-rw-r--r--examples/ip_pipeline/pipeline.h93
-rw-r--r--examples/ip_pipeline/pipeline/hash_func.h351
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_actions_common.h231
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_be.c206
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_be.h163
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_fe.c1310
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_fe.h234
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall.c1869
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall.h77
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall_be.c907
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall_be.h176
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions.c1814
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions.h78
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c1011
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h168
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.c2215
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.h107
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c811
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h142
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master.c47
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master.h41
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master_be.c150
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master_be.h41
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough.c47
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough.h41
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.c804
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.h59
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.c2239
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.h93
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.c1970
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.h296
-rw-r--r--examples/ip_pipeline/pipeline_be.h305
-rw-r--r--examples/ip_pipeline/thread.c322
-rw-r--r--examples/ip_pipeline/thread.h98
-rw-r--r--examples/ip_pipeline/thread_fe.c461
-rw-r--r--examples/ip_pipeline/thread_fe.h101
56 files changed, 27182 insertions, 0 deletions
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
new file mode 100644
index 00000000..10fe1ba9
--- /dev/null
+++ b/examples/ip_pipeline/Makefile
@@ -0,0 +1,79 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+DIRS-(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = ip_pipeline
+
+VPATH += $(SRCDIR)/pipeline
+
+INC += $(wildcard *.h) $(wildcard pipeline/*.h)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_check.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += init.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += thread_fe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += cpu_core_map.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_common_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_common_fe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_master_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_master.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_passthrough_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_passthrough.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_firewall_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_firewall.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_classification_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_classification.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_actions_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_actions.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_routing_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_routing.c
+
+CFLAGS += -I$(SRCDIR) -I$(SRCDIR)/pipeline
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -Wno-error=unused-function -Wno-error=unused-variable
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
new file mode 100644
index 00000000..55a98417
--- /dev/null
+++ b/examples/ip_pipeline/app.h
@@ -0,0 +1,949 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_APP_H__
+#define __INCLUDE_APP_H__
+
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_ring.h>
+#include <rte_sched.h>
+#include <cmdline_parse.h>
+
+#include <rte_ethdev.h>
+
+#include "cpu_core_map.h"
+#include "pipeline.h"
+
+#define APP_PARAM_NAME_SIZE PIPELINE_NAME_SIZE
+#define APP_LINK_PCI_BDF_SIZE 16
+struct app_mempool_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t buffer_size;
+ uint32_t pool_size;
+ uint32_t cache_size;
+ uint32_t cpu_socket_id;
+};
+
+struct app_link_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t pmd_id; /* Generated based on port mask */
+ uint32_t arp_q; /* 0 = Disabled (packets go to default queue 0) */
+ uint32_t tcp_syn_q; /* 0 = Disabled (pkts go to default queue) */
+ uint32_t ip_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t tcp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t udp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t sctp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t state; /* DOWN = 0, UP = 1 */
+ uint32_t ip; /* 0 = Invalid */
+ uint32_t depth; /* Valid only when IP is valid */
+ uint64_t mac_addr; /* Read from HW */
+ char pci_bdf[APP_LINK_PCI_BDF_SIZE];
+
+ struct rte_eth_conf conf;
+ uint8_t promisc;
+};
+
+struct app_pktq_hwq_in_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t mempool_id; /* Position in the app->mempool_params */
+ uint32_t size;
+ uint32_t burst;
+
+ struct rte_eth_rxconf conf;
+};
+
+struct app_pktq_hwq_out_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t burst;
+ uint32_t dropless;
+ uint64_t n_retries;
+ struct rte_eth_txconf conf;
+};
+
+struct app_pktq_swq_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t burst_read;
+ uint32_t burst_write;
+ uint32_t dropless;
+ uint64_t n_retries;
+ uint32_t cpu_socket_id;
+ uint32_t ipv4_frag;
+ uint32_t ipv6_frag;
+ uint32_t ipv4_ras;
+ uint32_t ipv6_ras;
+ uint32_t mtu;
+ uint32_t metadata_size;
+ uint32_t mempool_direct_id;
+ uint32_t mempool_indirect_id;
+};
+
+#ifndef APP_FILE_NAME_SIZE
+#define APP_FILE_NAME_SIZE 256
+#endif
+
+#ifndef APP_MAX_SCHED_SUBPORTS
+#define APP_MAX_SCHED_SUBPORTS 8
+#endif
+
+#ifndef APP_MAX_SCHED_PIPES
+#define APP_MAX_SCHED_PIPES 4096
+#endif
+
+struct app_pktq_tm_params {
+ char *name;
+ uint32_t parsed;
+ const char *file_name;
+ struct rte_sched_port_params sched_port_params;
+ struct rte_sched_subport_params
+ sched_subport_params[APP_MAX_SCHED_SUBPORTS];
+ struct rte_sched_pipe_params
+ sched_pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+ int sched_pipe_to_profile[APP_MAX_SCHED_SUBPORTS * APP_MAX_SCHED_PIPES];
+ uint32_t burst_read;
+ uint32_t burst_write;
+};
+
+struct app_pktq_source_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t mempool_id; /* Position in the app->mempool_params array */
+ uint32_t burst;
+ char *file_name; /* Full path of PCAP file to be copied to mbufs */
+ uint32_t n_bytes_per_pkt;
+};
+
+struct app_pktq_sink_params {
+ char *name;
+ uint8_t parsed;
+ char *file_name; /* Full path of PCAP file to be copied to mbufs */
+ uint32_t n_pkts_to_dump;
+};
+
+struct app_msgq_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t cpu_socket_id;
+};
+
+enum app_pktq_in_type {
+ APP_PKTQ_IN_HWQ,
+ APP_PKTQ_IN_SWQ,
+ APP_PKTQ_IN_TM,
+ APP_PKTQ_IN_SOURCE,
+};
+
+struct app_pktq_in_params {
+ enum app_pktq_in_type type;
+ uint32_t id; /* Position in the appropriate app array */
+};
+
+enum app_pktq_out_type {
+ APP_PKTQ_OUT_HWQ,
+ APP_PKTQ_OUT_SWQ,
+ APP_PKTQ_OUT_TM,
+ APP_PKTQ_OUT_SINK,
+};
+
+struct app_pktq_out_params {
+ enum app_pktq_out_type type;
+ uint32_t id; /* Position in the appropriate app array */
+};
+
+#ifndef APP_PIPELINE_TYPE_SIZE
+#define APP_PIPELINE_TYPE_SIZE 64
+#endif
+
+#define APP_MAX_PIPELINE_PKTQ_IN PIPELINE_MAX_PORT_IN
+#define APP_MAX_PIPELINE_PKTQ_OUT PIPELINE_MAX_PORT_OUT
+#define APP_MAX_PIPELINE_MSGQ_IN PIPELINE_MAX_MSGQ_IN
+#define APP_MAX_PIPELINE_MSGQ_OUT PIPELINE_MAX_MSGQ_OUT
+
+#define APP_MAX_PIPELINE_ARGS PIPELINE_MAX_ARGS
+
+struct app_pipeline_params {
+ char *name;
+ uint8_t parsed;
+
+ char type[APP_PIPELINE_TYPE_SIZE];
+
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t hyper_th_id;
+
+ struct app_pktq_in_params pktq_in[APP_MAX_PIPELINE_PKTQ_IN];
+ struct app_pktq_out_params pktq_out[APP_MAX_PIPELINE_PKTQ_OUT];
+ uint32_t msgq_in[APP_MAX_PIPELINE_MSGQ_IN];
+ uint32_t msgq_out[APP_MAX_PIPELINE_MSGQ_OUT];
+
+ uint32_t n_pktq_in;
+ uint32_t n_pktq_out;
+ uint32_t n_msgq_in;
+ uint32_t n_msgq_out;
+
+ uint32_t timer_period;
+
+ char *args_name[APP_MAX_PIPELINE_ARGS];
+ char *args_value[APP_MAX_PIPELINE_ARGS];
+ uint32_t n_args;
+};
+
+struct app_pipeline_data {
+ void *be;
+ void *fe;
+ struct pipeline_type *ptype;
+ uint64_t timer_period;
+ uint32_t enabled;
+};
+
+struct app_thread_pipeline_data {
+ uint32_t pipeline_id;
+ void *be;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+ uint64_t timer_period;
+ uint64_t deadline;
+};
+
+#ifndef APP_MAX_THREAD_PIPELINES
+#define APP_MAX_THREAD_PIPELINES 16
+#endif
+
+#ifndef APP_THREAD_TIMER_PERIOD
+#define APP_THREAD_TIMER_PERIOD 1
+#endif
+
+struct app_thread_data {
+ struct app_thread_pipeline_data regular[APP_MAX_THREAD_PIPELINES];
+ struct app_thread_pipeline_data custom[APP_MAX_THREAD_PIPELINES];
+
+ uint32_t n_regular;
+ uint32_t n_custom;
+
+ uint64_t timer_period;
+ uint64_t thread_req_deadline;
+
+ uint64_t deadline;
+
+ struct rte_ring *msgq_in;
+ struct rte_ring *msgq_out;
+
+ uint64_t headroom_time;
+ uint64_t headroom_cycles;
+ double headroom_ratio;
+};
+
+#ifndef APP_MAX_LINKS
+#define APP_MAX_LINKS 16
+#endif
+
+struct app_eal_params {
+ /* Map lcore set to physical cpu set */
+ char *coremap;
+
+ /* Core ID that is used as master */
+ uint32_t master_lcore_present;
+ uint32_t master_lcore;
+
+ /* Number of memory channels */
+ uint32_t channels_present;
+ uint32_t channels;
+
+ /* Memory to allocate (see also --socket-mem) */
+ uint32_t memory_present;
+ uint32_t memory;
+
+ /* Force number of memory ranks (don't detect) */
+ uint32_t ranks_present;
+ uint32_t ranks;
+
+ /* Add a PCI device in black list. */
+ char *pci_blacklist[APP_MAX_LINKS];
+
+ /* Add a PCI device in white list. */
+ char *pci_whitelist[APP_MAX_LINKS];
+
+ /* Add a virtual device. */
+ char *vdev[APP_MAX_LINKS];
+
+ /* Use VMware TSC map instead of native RDTSC */
+ uint32_t vmware_tsc_map_present;
+ int vmware_tsc_map;
+
+ /* Type of this process (primary|secondary|auto) */
+ char *proc_type;
+
+ /* Set syslog facility */
+ char *syslog;
+
+ /* Set default log level */
+ uint32_t log_level_present;
+ uint32_t log_level;
+
+ /* Display version information on startup */
+ uint32_t version_present;
+ int version;
+
+ /* This help */
+ uint32_t help_present;
+ int help;
+
+ /* Use malloc instead of hugetlbfs */
+ uint32_t no_huge_present;
+ int no_huge;
+
+ /* Disable PCI */
+ uint32_t no_pci_present;
+ int no_pci;
+
+ /* Disable HPET */
+ uint32_t no_hpet_present;
+ int no_hpet;
+
+ /* No shared config (mmap'd files) */
+ uint32_t no_shconf_present;
+ int no_shconf;
+
+ /* Add driver */
+ char *add_driver;
+
+ /* Memory to allocate on sockets (comma separated values)*/
+ char *socket_mem;
+
+ /* Directory where hugetlbfs is mounted */
+ char *huge_dir;
+
+ /* Prefix for hugepage filenames */
+ char *file_prefix;
+
+ /* Base virtual address */
+ char *base_virtaddr;
+
+ /* Create /dev/uioX (usually done by hotplug) */
+ uint32_t create_uio_dev_present;
+ int create_uio_dev;
+
+ /* Interrupt mode for VFIO (legacy|msi|msix) */
+ char *vfio_intr;
+
+ /* Support running on Xen dom0 without hugetlbfs */
+ uint32_t xen_dom0_present;
+ int xen_dom0;
+};
+
+#ifndef APP_APPNAME_SIZE
+#define APP_APPNAME_SIZE 256
+#endif
+
+#ifndef APP_MAX_MEMPOOLS
+#define APP_MAX_MEMPOOLS 8
+#endif
+
+#ifndef APP_LINK_MAX_HWQ_IN
+#define APP_LINK_MAX_HWQ_IN 64
+#endif
+
+#ifndef APP_LINK_MAX_HWQ_OUT
+#define APP_LINK_MAX_HWQ_OUT 64
+#endif
+
+#define APP_MAX_HWQ_IN (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
+
+#define APP_MAX_HWQ_OUT (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
+
+#ifndef APP_MAX_PKTQ_SWQ
+#define APP_MAX_PKTQ_SWQ 256
+#endif
+
+#define APP_MAX_PKTQ_TM APP_MAX_LINKS
+
+#ifndef APP_MAX_PKTQ_SOURCE
+#define APP_MAX_PKTQ_SOURCE 16
+#endif
+
+#ifndef APP_MAX_PKTQ_SINK
+#define APP_MAX_PKTQ_SINK 16
+#endif
+
+#ifndef APP_MAX_MSGQ
+#define APP_MAX_MSGQ 64
+#endif
+
+#ifndef APP_MAX_PIPELINES
+#define APP_MAX_PIPELINES 64
+#endif
+
+#ifndef APP_EAL_ARGC
+#define APP_EAL_ARGC 32
+#endif
+
+#ifndef APP_MAX_PIPELINE_TYPES
+#define APP_MAX_PIPELINE_TYPES 64
+#endif
+
+#ifndef APP_MAX_THREADS
+#define APP_MAX_THREADS RTE_MAX_LCORE
+#endif
+
+#ifndef APP_MAX_CMDS
+#define APP_MAX_CMDS 64
+#endif
+
+#ifndef APP_THREAD_HEADROOM_STATS_COLLECT
+#define APP_THREAD_HEADROOM_STATS_COLLECT 1
+#endif
+
+struct app_params {
+ /* Config */
+ char app_name[APP_APPNAME_SIZE];
+ const char *config_file;
+ const char *script_file;
+ const char *parser_file;
+ const char *output_file;
+ const char *preproc;
+ const char *preproc_args;
+ uint64_t port_mask;
+ uint32_t log_level;
+
+ struct app_eal_params eal_params;
+ struct app_mempool_params mempool_params[APP_MAX_MEMPOOLS];
+ struct app_link_params link_params[APP_MAX_LINKS];
+ struct app_pktq_hwq_in_params hwq_in_params[APP_MAX_HWQ_IN];
+ struct app_pktq_hwq_out_params hwq_out_params[APP_MAX_HWQ_OUT];
+ struct app_pktq_swq_params swq_params[APP_MAX_PKTQ_SWQ];
+ struct app_pktq_tm_params tm_params[APP_MAX_PKTQ_TM];
+ struct app_pktq_source_params source_params[APP_MAX_PKTQ_SOURCE];
+ struct app_pktq_sink_params sink_params[APP_MAX_PKTQ_SINK];
+ struct app_msgq_params msgq_params[APP_MAX_MSGQ];
+ struct app_pipeline_params pipeline_params[APP_MAX_PIPELINES];
+
+ uint32_t n_mempools;
+ uint32_t n_links;
+ uint32_t n_pktq_hwq_in;
+ uint32_t n_pktq_hwq_out;
+ uint32_t n_pktq_swq;
+ uint32_t n_pktq_tm;
+ uint32_t n_pktq_source;
+ uint32_t n_pktq_sink;
+ uint32_t n_msgq;
+ uint32_t n_pipelines;
+
+ /* Init */
+ char *eal_argv[1 + APP_EAL_ARGC];
+ struct cpu_core_map *core_map;
+ uint64_t core_mask;
+ struct rte_mempool *mempool[APP_MAX_MEMPOOLS];
+ struct rte_ring *swq[APP_MAX_PKTQ_SWQ];
+ struct rte_sched_port *tm[APP_MAX_PKTQ_TM];
+ struct rte_ring *msgq[APP_MAX_MSGQ];
+ struct pipeline_type pipeline_type[APP_MAX_PIPELINE_TYPES];
+ struct app_pipeline_data pipeline_data[APP_MAX_PIPELINES];
+ struct app_thread_data thread_data[APP_MAX_THREADS];
+ cmdline_parse_ctx_t cmds[APP_MAX_CMDS + 1];
+
+ int eal_argc;
+ uint32_t n_pipeline_types;
+ uint32_t n_cmds;
+};
+
+#define APP_PARAM_VALID(obj) ((obj)->name != NULL)
+
+#define APP_PARAM_COUNT(obj_array, n_objs) \
+{ \
+ size_t i; \
+ \
+ n_objs = 0; \
+ for (i = 0; i < RTE_DIM(obj_array); i++) \
+ if (APP_PARAM_VALID(&((obj_array)[i]))) \
+ n_objs++; \
+}
+
+#define APP_PARAM_FIND(obj_array, key) \
+({ \
+ ssize_t obj_idx; \
+ const ssize_t obj_count = RTE_DIM(obj_array); \
+ \
+ for (obj_idx = 0; obj_idx < obj_count; obj_idx++) { \
+ if (!APP_PARAM_VALID(&((obj_array)[obj_idx]))) \
+ continue; \
+ \
+ if (strcmp(key, (obj_array)[obj_idx].name) == 0) \
+ break; \
+ } \
+ obj_idx < obj_count ? obj_idx : -ENOENT; \
+})
+
+#define APP_PARAM_FIND_BY_ID(obj_array, prefix, id, obj) \
+do { \
+ char name[APP_PARAM_NAME_SIZE]; \
+ ssize_t pos; \
+ \
+ sprintf(name, prefix "%" PRIu32, id); \
+ pos = APP_PARAM_FIND(obj_array, name); \
+ obj = (pos < 0) ? NULL : &((obj_array)[pos]); \
+} while (0)
+
+#define APP_PARAM_GET_ID(obj, prefix, id) \
+do \
+ sscanf(obj->name, prefix "%" SCNu32, &id); \
+while (0) \
+
+#define APP_PARAM_ADD(obj_array, obj_name) \
+({ \
+ ssize_t obj_idx; \
+ const ssize_t obj_count = RTE_DIM(obj_array); \
+ \
+ obj_idx = APP_PARAM_FIND(obj_array, obj_name); \
+ if (obj_idx < 0) { \
+ for (obj_idx = 0; obj_idx < obj_count; obj_idx++) { \
+ if (!APP_PARAM_VALID(&((obj_array)[obj_idx]))) \
+ break; \
+ } \
+ \
+ if (obj_idx < obj_count) { \
+ (obj_array)[obj_idx].name = strdup(obj_name); \
+ if ((obj_array)[obj_idx].name == NULL) \
+ obj_idx = -EINVAL; \
+ } else \
+ obj_idx = -ENOMEM; \
+ } \
+ obj_idx; \
+})
+
+#define APP_CHECK(exp, fmt, ...) \
+do { \
+ if (!(exp)) { \
+ fprintf(stderr, fmt "\n", ## __VA_ARGS__); \
+ abort(); \
+ } \
+} while (0)
+
+enum app_log_level {
+ APP_LOG_LEVEL_HIGH = 1,
+ APP_LOG_LEVEL_LOW,
+ APP_LOG_LEVELS
+};
+
+#define APP_LOG(app, level, fmt, ...) \
+do { \
+ if (app->log_level >= APP_LOG_LEVEL_ ## level) \
+ fprintf(stdout, "[APP] " fmt "\n", ## __VA_ARGS__); \
+} while (0)
+
+static inline uint32_t
+app_link_get_n_rxq(struct app_params *app, struct app_link_params *link)
+{
+ uint32_t n_rxq = 0, link_id, i;
+ uint32_t n_pktq_hwq_in = RTE_MIN(app->n_pktq_hwq_in,
+ RTE_DIM(app->hwq_in_params));
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ for (i = 0; i < n_pktq_hwq_in; i++) {
+ struct app_pktq_hwq_in_params *p = &app->hwq_in_params[i];
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id, &rxq_queue_id);
+ if (rxq_link_id == link_id)
+ n_rxq++;
+ }
+
+ return n_rxq;
+}
+
+static inline uint32_t
+app_link_get_n_txq(struct app_params *app, struct app_link_params *link)
+{
+ uint32_t n_txq = 0, link_id, i;
+ uint32_t n_pktq_hwq_out = RTE_MIN(app->n_pktq_hwq_out,
+ RTE_DIM(app->hwq_out_params));
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ for (i = 0; i < n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p = &app->hwq_out_params[i];
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p->name, "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id, &txq_queue_id);
+ if (txq_link_id == link_id)
+ n_txq++;
+ }
+
+ return n_txq;
+}
+
+static inline uint32_t
+app_rxq_get_readers(struct app_params *app, struct app_pktq_hwq_in_params *rxq)
+{
+ uint32_t pos = rxq - app->hwq_in_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_HWQ) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_swq_get_readers(struct app_params *app, struct app_pktq_swq_params *swq)
+{
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SWQ) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_tm_get_readers(struct app_params *app, struct app_pktq_tm_params *tm)
+{
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_TM) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_source_get_readers(struct app_params *app,
+struct app_pktq_source_params *source)
+{
+ uint32_t pos = source - app->source_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SOURCE) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_msgq_get_readers(struct app_params *app, struct app_msgq_params *msgq)
+{
+ uint32_t pos = msgq - app->msgq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_msgq_in = RTE_MIN(p->n_msgq_in, RTE_DIM(p->msgq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_msgq_in; j++)
+ if (p->msgq_in[j] == pos)
+ n_readers++;
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_txq_get_writers(struct app_params *app, struct app_pktq_hwq_out_params *txq)
+{
+ uint32_t pos = txq - app->hwq_out_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_HWQ) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_swq_get_writers(struct app_params *app, struct app_pktq_swq_params *swq)
+{
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SWQ) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_tm_get_writers(struct app_params *app, struct app_pktq_tm_params *tm)
+{
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_TM) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_sink_get_writers(struct app_params *app, struct app_pktq_sink_params *sink)
+{
+ uint32_t pos = sink - app->sink_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SINK) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_msgq_get_writers(struct app_params *app, struct app_msgq_params *msgq)
+{
+ uint32_t pos = msgq - app->msgq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_msgq_out = RTE_MIN(p->n_msgq_out,
+ RTE_DIM(p->msgq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_msgq_out; j++)
+ if (p->msgq_out[j] == pos)
+ n_writers++;
+ }
+
+ return n_writers;
+}
+
+static inline struct app_link_params *
+app_get_link_for_rxq(struct app_params *app, struct app_pktq_hwq_in_params *p)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ ssize_t link_param_idx;
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id, &rxq_queue_id);
+ sprintf(link_name, "LINK%" PRIu32, rxq_link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+static inline struct app_link_params *
+app_get_link_for_txq(struct app_params *app, struct app_pktq_hwq_out_params *p)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ ssize_t link_param_idx;
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p->name, "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id, &txq_queue_id);
+ sprintf(link_name, "LINK%" PRIu32, txq_link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+static inline struct app_link_params *
+app_get_link_for_tm(struct app_params *app, struct app_pktq_tm_params *p_tm)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ uint32_t link_id;
+ ssize_t link_param_idx;
+
+ sscanf(p_tm->name, "TM%" PRIu32, &link_id);
+ sprintf(link_name, "LINK%" PRIu32, link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p_tm->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+int app_config_init(struct app_params *app);
+
+int app_config_args(struct app_params *app,
+ int argc, char **argv);
+
+int app_config_preproc(struct app_params *app);
+
+int app_config_parse(struct app_params *app,
+ const char *file_name);
+
+int app_config_parse_tm(struct app_params *app);
+
+void app_config_save(struct app_params *app,
+ const char *file_name);
+
+int app_config_check(struct app_params *app);
+
+int app_init(struct app_params *app);
+
+int app_thread(void *arg);
+
+int app_pipeline_type_register(struct app_params *app,
+ struct pipeline_type *ptype);
+
+struct pipeline_type *app_pipeline_type_find(struct app_params *app,
+ char *name);
+
+void app_link_up_internal(struct app_params *app,
+ struct app_link_params *cp);
+
+void app_link_down_internal(struct app_params *app,
+ struct app_link_params *cp);
+
+#endif
diff --git a/examples/ip_pipeline/config/edge_router_downstream.cfg b/examples/ip_pipeline/config/edge_router_downstream.cfg
new file mode 100644
index 00000000..85bbab8f
--- /dev/null
+++ b/examples/ip_pipeline/config/edge_router_downstream.cfg
@@ -0,0 +1,85 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+; An edge router typically sits between two networks such as the provider
+; core network and the provider access network. A typical packet processing
+; pipeline for the downstream traffic (i.e. traffic from core to access
+; network) contains the following functional blocks: Packet RX & Routing,
+; Traffic management and Packet TX. The input packets are assumed to be
+; IPv4, while the output packets are Q-in-Q IPv4.
+
+; A simple implementation for this functional pipeline is presented below.
+
+; Packet Rx & Traffic Management Packet Tx
+; Routing (Pass-Through) (Pass-Through)
+; _____________________ SWQ0 ______________________ SWQ4 _____________________
+; RXQ0.0 --->| |----->| |----->| |---> TXQ0.0
+; | | SWQ1 | | SWQ5 | |
+; RXQ1.0 --->| |----->| |----->| |---> TXQ1.0
+; | (P1) | SWQ2 | (P2) | SWQ6 | (P3) |
+; RXQ2.0 --->| |----->| |----->| |---> TXQ2.0
+; | | SWQ3 | | SWQ7 | |
+; RXQ3.0 --->| |----->| |----->| |---> TXQ3.0
+; |_____________________| |______________________| |_____________________|
+; | _|_ ^ _|_ ^ _|_ ^ _|_ ^
+; | |___|||___|||___|||___||
+; +--> SINK0 |___|||___|||___|||___||
+; (route miss) |__| |__| |__| |__|
+; TM0 TM1 TM2 TM3
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = SWQ0 SWQ1 SWQ2 SWQ3 SINK0
+encap = ethernet_qinq
+qinq_sched = test
+ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+
+[PIPELINE2]
+type = PASS-THROUGH
+core = 2
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3 TM0 TM1 TM2 TM3
+pktq_out = TM0 TM1 TM2 TM3 SWQ4 SWQ5 SWQ6 SWQ7
+
+[PIPELINE3]
+type = PASS-THROUGH
+core = 3
+pktq_in = SWQ4 SWQ5 SWQ6 SWQ7
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+
+[MEMPOOL0]
+pool_size = 2M
diff --git a/examples/ip_pipeline/config/edge_router_downstream.sh b/examples/ip_pipeline/config/edge_router_downstream.sh
new file mode 100644
index 00000000..ce46beb5
--- /dev/null
+++ b/examples/ip_pipeline/config/edge_router_downstream.sh
@@ -0,0 +1,10 @@
+################################################################################
+# Routing: Ether QinQ, ARP off
+################################################################################
+p 1 route add default 4 #SINK0
+p 1 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 256 257
+p 1 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 258 259
+p 1 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 260 261
+p 1 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 262 263
+
+p 1 route ls
diff --git a/examples/ip_pipeline/config/edge_router_upstream.cfg b/examples/ip_pipeline/config/edge_router_upstream.cfg
new file mode 100644
index 00000000..a08c5cce
--- /dev/null
+++ b/examples/ip_pipeline/config/edge_router_upstream.cfg
@@ -0,0 +1,110 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; An edge router typically sits between two networks such as the provider
+; core network and the provider access network. A typical packet processing
+; pipeline for the upstream traffic (i.e. traffic from access to core
+; network) contains the following functional blocks: Packet RX & Firewall,
+; Flow classification, Metering, Routing and Packet TX. The input packets
+; are assumed to be Q-in-Q IPv4, while the output packets are MPLS IPv4
+; (with variable number of labels per route).
+
+; A simple implementation for this functional pipeline is presented below.
+
+; Packet Rx & Pass-Through Flow-Classification Flow-Actions Routing
+: Firewall
+; __________ SWQ0 __________ SWQ4 __________ SWQ8 __________ SWQ12 __________
+; RXQ0.0 --->| |------>| |------>| |------>| |------>| |------> TXQ0.0
+; | | SWQ1 | | SWQ5 | | SWQ9 | | SWQ13 | |
+; RXQ1.0 --->| |------>| |------>| |------>| |------>| |------> TXQ1.0
+; | (P1) | SWQ2 | (P2) | SWQ6 | (P3) | SWQ10 | (P4) | SWQ14 | (P5) |
+; RXQ2.0 --->| |------>| |------>| |------>| |------>| |------> TXQ2.0
+; | | SWQ3 | | SWQ7 | | SWQ11 | | SWQ15 | |
+; RXQ3.0 --->| |------>| |------>| |------>| |------>| |------> TXQ3.0
+; |__________| |__________| |__________| |__________| |__________|
+; | | |
+; +--> SINK0 (Default) +--> SINK1 (Default) +--> SINK2 (Route Miss)
+
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FIREWALL
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = SWQ0 SWQ1 SWQ2 SWQ3 SINK0
+n_rules = 4096
+pkt_type = qinq_ipv4
+
+[PIPELINE2]
+type = PASS-THROUGH
+core = 2
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3
+pktq_out = SWQ4 SWQ5 SWQ6 SWQ7
+dma_size = 8
+dma_dst_offset = 128; mbuf (128)
+dma_src_offset = 268; mbuf (128) + headroom (128) + 1st ethertype offset (12) = 268
+dma_src_mask = 00000FFF00000FFF; qinq
+dma_hash_offset = 136; dma_dst_offset + dma_size = 136
+
+[PIPELINE3]
+type = FLOW_CLASSIFICATION
+core = 2
+pktq_in = SWQ4 SWQ5 SWQ6 SWQ7
+pktq_out = SWQ8 SWQ9 SWQ10 SWQ11 SINK1
+n_flows = 65536
+key_size = 8; dma_size
+key_offset = 128; dma_dst_offset
+hash_offset = 136; dma_hash_offset
+flowid_offset = 192; mbuf (128) + 64
+
+[PIPELINE4]
+type = FLOW_ACTIONS
+core = 3
+pktq_in = SWQ8 SWQ9 SWQ10 SWQ11
+pktq_out = SWQ12 SWQ13 SWQ14 SWQ15
+n_flows = 65536
+n_meters_per_flow = 1
+flow_id_offset = 192; flowid_offset
+ip_hdr_offset = 278; mbuf (128) + headroom (128) + ethernet (14) + qinq (8) = 278
+color_offset = 196; flowid_offset + sizeof(flow_id)
+
+[PIPELINE5]
+type = ROUTING
+core = 4
+pktq_in = SWQ12 SWQ13 SWQ14 SWQ15
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2
+encap = ethernet_mpls
+mpls_color_mark = yes
+ip_hdr_offset = 278; mbuf (128) + headroom (128) + ethernet (14) + qinq (8) = 278
+color_offset = 196; flowid_offset + sizeof(flow_id)
diff --git a/examples/ip_pipeline/config/edge_router_upstream.sh b/examples/ip_pipeline/config/edge_router_upstream.sh
new file mode 100644
index 00000000..eeba600c
--- /dev/null
+++ b/examples/ip_pipeline/config/edge_router_upstream.sh
@@ -0,0 +1,38 @@
+################################################
+# Firewall Rules:4 for 4 ports
+################################################
+p 1 firewall add ipv4 1 0.0.0.0 8 0.0.0.0 10 0 0 0 0 6 1 0
+p 1 firewall add ipv4 1 0.0.0.0 8 0.64.0.0 10 0 0 0 0 6 1 1
+p 1 firewall add ipv4 1 0.0.0.0 8 0.128.0.0 10 0 0 0 0 6 1 2
+p 1 firewall add ipv4 1 0.0.0.0 8 0.192.0.0 10 0 0 0 0 6 1 3
+p 1 firewall add default 4 #SINK0
+
+
+################################################################################
+# Flow classification
+################################################################################
+p 3 flow add default 4 #SINK1
+p 3 flow add qinq all 65536 4
+
+################################################################################
+# Flow Actions - Metering
+################################################################################
+p 4 flows 65536 meter 0 trtcm 1250000000 1250000000 100000000 100000000
+p 4 flows 65536 ports 4
+
+################################################################################
+# Routing: Ether MPLS, ARP off
+################################################################################
+p 5 route add default 4 #SINK2
+p 5 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 0:1
+p 5 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 10:11
+p 5 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 20:21
+p 5 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 30:31
+
+################################################################################
+# List all configurations
+################################################################################
+p 1 firewall ls
+#p 3 flow ls
+#p 4 flow actions ls
+p 5 route ls
diff --git a/examples/ip_pipeline/config/ip_pipeline.cfg b/examples/ip_pipeline/config/ip_pipeline.cfg
new file mode 100644
index 00000000..095ed25e
--- /dev/null
+++ b/examples/ip_pipeline/config/ip_pipeline.cfg
@@ -0,0 +1,9 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
diff --git a/examples/ip_pipeline/config/ip_pipeline.sh b/examples/ip_pipeline/config/ip_pipeline.sh
new file mode 100644
index 00000000..4fca2597
--- /dev/null
+++ b/examples/ip_pipeline/config/ip_pipeline.sh
@@ -0,0 +1,5 @@
+#
+#run config/ip_pipeline.sh
+#
+
+p 1 ping
diff --git a/examples/ip_pipeline/config/l2fwd.cfg b/examples/ip_pipeline/config/l2fwd.cfg
new file mode 100644
index 00000000..c743a143
--- /dev/null
+++ b/examples/ip_pipeline/config/l2fwd.cfg
@@ -0,0 +1,55 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+;
+; The pass-through pipeline below connects the input ports to the output ports
+; as follows: RXQ0.0 -> TXQ1.0, RXQ1.0 -> TXQ0.0, RXQ2.0 -> TXQ3.0 and
+; RXQ3.0 -> TXQ2.0.
+; ________________
+; RXQ0.0 --->|................|---> TXQ1.0
+; | |
+; RXQ1.0 --->|................|---> TXQ0.0
+; | Pass-through |
+; RXQ2.0 --->|................|---> TXQ3.0
+; | |
+; RXQ3.0 --->|................|---> TXQ2.0
+; |________________|
+;
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ1.0 TXQ0.0 TXQ3.0 TXQ2.0
diff --git a/examples/ip_pipeline/config/l3fwd.cfg b/examples/ip_pipeline/config/l3fwd.cfg
new file mode 100644
index 00000000..5449dc32
--- /dev/null
+++ b/examples/ip_pipeline/config/l3fwd.cfg
@@ -0,0 +1,63 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; _______________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Routing |
+; RXQ2.0 --->| |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |_______________|
+; |
+; +-----------> SINK0 (route miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+encap = ethernet; encap = ethernet / ethernet_qinq / ethernet_mpls
+ip_hdr_offset = 270
diff --git a/examples/ip_pipeline/config/l3fwd.sh b/examples/ip_pipeline/config/l3fwd.sh
new file mode 100644
index 00000000..27740103
--- /dev/null
+++ b/examples/ip_pipeline/config/l3fwd.sh
@@ -0,0 +1,9 @@
+################################################################################
+# Routing: encap = ethernet, arp = off
+################################################################################
+p 1 route add default 4 #SINK0
+p 1 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0
+p 1 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1
+p 1 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2
+p 1 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3
+p 1 route ls
diff --git a/examples/ip_pipeline/config/tm_profile.cfg b/examples/ip_pipeline/config/tm_profile.cfg
new file mode 100644
index 00000000..2dfb215e
--- /dev/null
+++ b/examples/ip_pipeline/config/tm_profile.cfg
@@ -0,0 +1,105 @@
+; BSD LICENSE
+;
+; Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; This file enables the following hierarchical scheduler configuration for each
+; 10GbE output port:
+; * Single subport (subport 0):
+; - Subport rate set to 100% of port rate
+; - Each of the 4 traffic classes has rate set to 100% of port rate
+; * 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
+; - Pipe rate set to 1/4K of port rate
+; - Each of the 4 traffic classes has rate set to 100% of pipe rate
+; - Within each traffic class, the byte-level WRR weights for the 4 queues
+; are set to 1:1:1:1
+;
+; For more details, please refer to chapter "Quality of Service (QoS) Framework"
+; of Data Plane Development Kit (DPDK) Programmer's Guide.
+
+; Port configuration
+[port]
+frame overhead = 24 ; frame overhead = Preamble (7) + SFD (1) + FCS (4) + IFG (12)
+mtu = 1522; mtu = Q-in-Q MTU (FCS not included)
+number of subports per port = 1
+number of pipes per subport = 4096
+queue sizes = 64 64 64 64
+
+; Subport configuration
+[subport 0]
+tb rate = 1250000000 ; Bytes per second
+tb size = 1000000 ; Bytes
+
+tc 0 rate = 1250000000 ; Bytes per second
+tc 1 rate = 1250000000 ; Bytes per second
+tc 2 rate = 1250000000 ; Bytes per second
+tc 3 rate = 1250000000 ; Bytes per second
+tc period = 10 ; Milliseconds
+
+pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0
+
+; Pipe configuration
+[pipe profile 0]
+tb rate = 305175 ; Bytes per second
+tb size = 1000000 ; Bytes
+
+tc 0 rate = 305175 ; Bytes per second
+tc 1 rate = 305175 ; Bytes per second
+tc 2 rate = 305175 ; Bytes per second
+tc 3 rate = 305175 ; Bytes per second
+tc period = 40 ; Milliseconds
+
+tc 3 oversubscription weight = 1
+
+tc 0 wrr weights = 1 1 1 1
+tc 1 wrr weights = 1 1 1 1
+tc 2 wrr weights = 1 1 1 1
+tc 3 wrr weights = 1 1 1 1
+
+; RED params per traffic class and color (Green / Yellow / Red)
+[red]
+tc 0 wred min = 48 40 32
+tc 0 wred max = 64 64 64
+tc 0 wred inv prob = 10 10 10
+tc 0 wred weight = 9 9 9
+
+tc 1 wred min = 48 40 32
+tc 1 wred max = 64 64 64
+tc 1 wred inv prob = 10 10 10
+tc 1 wred weight = 9 9 9
+
+tc 2 wred min = 48 40 32
+tc 2 wred max = 64 64 64
+tc 2 wred inv prob = 10 10 10
+tc 2 wred weight = 9 9 9
+
+tc 3 wred min = 48 40 32
+tc 3 wred max = 64 64 64
+tc 3 wred inv prob = 10 10 10
+tc 3 wred weight = 9 9 9
diff --git a/examples/ip_pipeline/config_check.c b/examples/ip_pipeline/config_check.c
new file mode 100644
index 00000000..fd9ff495
--- /dev/null
+++ b/examples/ip_pipeline/config_check.c
@@ -0,0 +1,444 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+
+#include <rte_ip.h>
+
+#include "app.h"
+
+static void
+check_mempools(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_mempools; i++) {
+ struct app_mempool_params *p = &app->mempool_params[i];
+
+ APP_CHECK((p->pool_size > 0),
+ "Mempool %s size is 0\n", p->name);
+
+ APP_CHECK((p->cache_size > 0),
+ "Mempool %s cache size is 0\n", p->name);
+
+ APP_CHECK(rte_is_power_of_2(p->cache_size),
+ "Mempool %s cache size not a power of 2\n", p->name);
+ }
+}
+
+static void
+check_links(struct app_params *app)
+{
+ uint32_t i;
+
+ /* Check that number of links matches the port mask */
+ if (app->port_mask) {
+ uint32_t n_links_port_mask =
+ __builtin_popcountll(app->port_mask);
+
+ APP_CHECK((app->n_links == n_links_port_mask),
+ "Not enough links provided in the PORT_MASK\n");
+ }
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+ uint32_t rxq_max, n_rxq, n_txq, link_id, i;
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ /* Check that link RXQs are contiguous */
+ rxq_max = 0;
+ if (link->arp_q > rxq_max)
+ rxq_max = link->arp_q;
+ if (link->tcp_syn_q > rxq_max)
+ rxq_max = link->tcp_syn_q;
+ if (link->ip_local_q > rxq_max)
+ rxq_max = link->ip_local_q;
+ if (link->tcp_local_q > rxq_max)
+ rxq_max = link->tcp_local_q;
+ if (link->udp_local_q > rxq_max)
+ rxq_max = link->udp_local_q;
+ if (link->sctp_local_q > rxq_max)
+ rxq_max = link->sctp_local_q;
+
+ for (i = 1; i <= rxq_max; i++)
+ APP_CHECK(((link->arp_q == i) ||
+ (link->tcp_syn_q == i) ||
+ (link->ip_local_q == i) ||
+ (link->tcp_local_q == i) ||
+ (link->udp_local_q == i) ||
+ (link->sctp_local_q == i)),
+ "%s RXQs are not contiguous (A)\n", link->name);
+
+ n_rxq = app_link_get_n_rxq(app, link);
+
+ APP_CHECK((n_rxq), "%s does not have any RXQ\n", link->name);
+
+ APP_CHECK((n_rxq == rxq_max + 1),
+ "%s RXQs are not contiguous (B)\n", link->name);
+
+ for (i = 0; i < n_rxq; i++) {
+ char name[APP_PARAM_NAME_SIZE];
+ int pos;
+
+ sprintf(name, "RXQ%" PRIu32 ".%" PRIu32,
+ link_id, i);
+ pos = APP_PARAM_FIND(app->hwq_in_params, name);
+ APP_CHECK((pos >= 0),
+ "%s RXQs are not contiguous (C)\n", link->name);
+ }
+
+ /* Check that link RXQs are contiguous */
+ n_txq = app_link_get_n_txq(app, link);
+
+ APP_CHECK((n_txq), "%s does not have any TXQ\n", link->name);
+
+ for (i = 0; i < n_txq; i++) {
+ char name[APP_PARAM_NAME_SIZE];
+ int pos;
+
+ sprintf(name, "TXQ%" PRIu32 ".%" PRIu32,
+ link_id, i);
+ pos = APP_PARAM_FIND(app->hwq_out_params, name);
+ APP_CHECK((pos >= 0),
+ "%s TXQs are not contiguous\n", link->name);
+ }
+ }
+}
+
+static void
+check_rxqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_hwq_in; i++) {
+ struct app_pktq_hwq_in_params *p = &app->hwq_in_params[i];
+ uint32_t n_readers = app_rxq_get_readers(app, p);
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst > 0),
+ "%s burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst <= p->size),
+ "%s burst size is bigger than its size\n", p->name);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+ }
+}
+
+static void
+check_txqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p = &app->hwq_out_params[i];
+ uint32_t n_writers = app_txq_get_writers(app, p);
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst > 0),
+ "%s burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst <= p->size),
+ "%s burst size is bigger than its size\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_swqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+ uint32_t n_readers = app_swq_get_readers(app, p);
+ uint32_t n_writers = app_swq_get_writers(app, p);
+ uint32_t n_flags;
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst_read > 0),
+ "%s read burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst_read <= p->size),
+ "%s read burst size is bigger than its size\n",
+ p->name);
+
+ APP_CHECK((p->burst_write > 0),
+ "%s write burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst_write <= p->size),
+ "%s write burst size is bigger than its size\n",
+ p->name);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ if (n_readers > 1)
+ APP_LOG(app, LOW, "%s has more than one reader", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ if (n_writers > 1)
+ APP_LOG(app, LOW, "%s has more than one writer", p->name);
+
+ n_flags = p->ipv4_frag + p->ipv6_frag + p->ipv4_ras + p->ipv6_ras;
+
+ APP_CHECK((n_flags < 2),
+ "%s has more than one fragmentation or reassembly mode enabled\n",
+ p->name);
+
+ APP_CHECK((!((n_readers > 1) && (n_flags == 1))),
+ "%s has more than one reader when fragmentation or reassembly"
+ " mode enabled\n",
+ p->name);
+
+ APP_CHECK((!((n_writers > 1) && (n_flags == 1))),
+ "%s has more than one writer when fragmentation or reassembly"
+ " mode enabled\n",
+ p->name);
+
+ n_flags = p->ipv4_ras + p->ipv6_ras;
+
+ APP_CHECK((!((p->dropless == 1) && (n_flags == 1))),
+ "%s has dropless when reassembly mode enabled\n", p->name);
+
+ n_flags = p->ipv4_frag + p->ipv6_frag;
+
+ if (n_flags == 1) {
+ uint16_t ip_hdr_size = (p->ipv4_frag) ? sizeof(struct ipv4_hdr) :
+ sizeof(struct ipv6_hdr);
+
+ APP_CHECK((p->mtu > ip_hdr_size),
+ "%s mtu size is smaller than ip header\n", p->name);
+
+ APP_CHECK((!((p->mtu - ip_hdr_size) % 8)),
+ "%s mtu size is incorrect\n", p->name);
+ }
+ }
+}
+
+static void
+check_tms(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_tm; i++) {
+ struct app_pktq_tm_params *p = &app->tm_params[i];
+ uint32_t n_readers = app_tm_get_readers(app, p);
+ uint32_t n_writers = app_tm_get_writers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_sources(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_source; i++) {
+ struct app_pktq_source_params *p = &app->source_params[i];
+ uint32_t n_readers = app_source_get_readers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+ }
+}
+
+static void
+check_sinks(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_sink; i++) {
+ struct app_pktq_sink_params *p = &app->sink_params[i];
+ uint32_t n_writers = app_sink_get_writers(app, p);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_msgqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_msgq; i++) {
+ struct app_msgq_params *p = &app->msgq_params[i];
+ uint32_t n_readers = app_msgq_get_readers(app, p);
+ uint32_t n_writers = app_msgq_get_writers(app, p);
+ uint32_t msgq_req_pipeline, msgq_rsp_pipeline;
+ uint32_t msgq_req_core, msgq_rsp_core;
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ msgq_req_pipeline = (strncmp(p->name, "MSGQ-REQ-PIPELINE",
+ strlen("MSGQ-REQ-PIPELINE")) == 0);
+
+ msgq_rsp_pipeline = (strncmp(p->name, "MSGQ-RSP-PIPELINE",
+ strlen("MSGQ-RSP-PIPELINE")) == 0);
+
+ msgq_req_core = (strncmp(p->name, "MSGQ-REQ-CORE",
+ strlen("MSGQ-REQ-CORE")) == 0);
+
+ msgq_rsp_core = (strncmp(p->name, "MSGQ-RSP-CORE",
+ strlen("MSGQ-RSP-CORE")) == 0);
+
+ if ((msgq_req_pipeline == 0) &&
+ (msgq_rsp_pipeline == 0) &&
+ (msgq_req_core == 0) &&
+ (msgq_rsp_core == 0)) {
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+
+ if (msgq_req_pipeline) {
+ struct app_pipeline_params *pipeline;
+ uint32_t pipeline_id;
+
+ APP_PARAM_GET_ID(p, "MSGQ-REQ-PIPELINE", pipeline_id);
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params,
+ "PIPELINE",
+ pipeline_id,
+ pipeline);
+
+ APP_CHECK((pipeline != NULL),
+ "%s is not associated with a valid pipeline\n",
+ p->name);
+ }
+
+ if (msgq_rsp_pipeline) {
+ struct app_pipeline_params *pipeline;
+ uint32_t pipeline_id;
+
+ APP_PARAM_GET_ID(p, "MSGQ-RSP-PIPELINE", pipeline_id);
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params,
+ "PIPELINE",
+ pipeline_id,
+ pipeline);
+
+ APP_CHECK((pipeline != NULL),
+ "%s is not associated with a valid pipeline\n",
+ p->name);
+ }
+ }
+}
+
+static void
+check_pipelines(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+
+ APP_CHECK((p->n_msgq_in == p->n_msgq_out),
+ "%s number of input MSGQs does not match "
+ "the number of output MSGQs\n", p->name);
+ }
+}
+
+int
+app_config_check(struct app_params *app)
+{
+ check_mempools(app);
+ check_links(app);
+ check_rxqs(app);
+ check_txqs(app);
+ check_swqs(app);
+ check_tms(app);
+ check_sources(app);
+ check_sinks(app);
+ check_msgqs(app);
+ check_pipelines(app);
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
new file mode 100644
index 00000000..e5efd03e
--- /dev/null
+++ b/examples/ip_pipeline/config_parse.c
@@ -0,0 +1,3383 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+#include "parser.h"
+
+/**
+ * Default config values
+ **/
+
+static struct app_params app_params_default = {
+ .config_file = "./config/ip_pipeline.cfg",
+ .log_level = APP_LOG_LEVEL_HIGH,
+ .port_mask = 0,
+
+ .eal_params = {
+ .channels = 4,
+ },
+};
+
+static const struct app_mempool_params mempool_params_default = {
+ .parsed = 0,
+ .buffer_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
+ .pool_size = 32 * 1024,
+ .cache_size = 256,
+ .cpu_socket_id = 0,
+};
+
+static const struct app_link_params link_params_default = {
+ .parsed = 0,
+ .pmd_id = 0,
+ .arp_q = 0,
+ .tcp_syn_q = 0,
+ .ip_local_q = 0,
+ .tcp_local_q = 0,
+ .udp_local_q = 0,
+ .sctp_local_q = 0,
+ .state = 0,
+ .ip = 0,
+ .depth = 0,
+ .mac_addr = 0,
+ .pci_bdf = {0},
+
+ .conf = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+
+ .header_split = 0, /* Header split */
+ .hw_ip_checksum = 0, /* IP checksum offload */
+ .hw_vlan_filter = 0, /* VLAN filtering */
+ .hw_vlan_strip = 0, /* VLAN strip */
+ .hw_vlan_extend = 0, /* Extended VLAN */
+ .jumbo_frame = 0, /* Jumbo frame support */
+ .hw_strip_crc = 0, /* CRC strip by HW */
+ .enable_scatter = 0, /* Scattered packets RX handler */
+
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+ .lpbk_mode = 0,
+ },
+
+ .promisc = 1,
+};
+
+static const struct app_pktq_hwq_in_params default_hwq_in_params = {
+ .parsed = 0,
+ .mempool_id = 0,
+ .size = 128,
+ .burst = 32,
+
+ .conf = {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 4,
+ },
+ .rx_free_thresh = 64,
+ .rx_drop_en = 0,
+ .rx_deferred_start = 0,
+ }
+};
+
+static const struct app_pktq_hwq_out_params default_hwq_out_params = {
+ .parsed = 0,
+ .size = 512,
+ .burst = 32,
+ .dropless = 0,
+ .n_retries = 0,
+
+ .conf = {
+ .tx_thresh = {
+ .pthresh = 36,
+ .hthresh = 0,
+ .wthresh = 0,
+ },
+ .tx_rs_thresh = 0,
+ .tx_free_thresh = 0,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ .tx_deferred_start = 0,
+ }
+};
+
+static const struct app_pktq_swq_params default_swq_params = {
+ .parsed = 0,
+ .size = 256,
+ .burst_read = 32,
+ .burst_write = 32,
+ .dropless = 0,
+ .n_retries = 0,
+ .cpu_socket_id = 0,
+ .ipv4_frag = 0,
+ .ipv6_frag = 0,
+ .ipv4_ras = 0,
+ .ipv6_ras = 0,
+ .mtu = 0,
+ .metadata_size = 0,
+ .mempool_direct_id = 0,
+ .mempool_indirect_id = 0,
+};
+
+struct app_pktq_tm_params default_tm_params = {
+ .parsed = 0,
+ .file_name = "./config/tm_profile.cfg",
+ .burst_read = 64,
+ .burst_write = 32,
+};
+
+struct app_pktq_source_params default_source_params = {
+ .parsed = 0,
+ .mempool_id = 0,
+ .burst = 32,
+ .file_name = NULL,
+ .n_bytes_per_pkt = 0,
+};
+
+struct app_pktq_sink_params default_sink_params = {
+ .parsed = 0,
+ .file_name = NULL,
+ .n_pkts_to_dump = 0,
+};
+
+struct app_msgq_params default_msgq_params = {
+ .parsed = 0,
+ .size = 64,
+ .cpu_socket_id = 0,
+};
+
+struct app_pipeline_params default_pipeline_params = {
+ .parsed = 0,
+ .socket_id = 0,
+ .core_id = 0,
+ .hyper_th_id = 0,
+ .n_pktq_in = 0,
+ .n_pktq_out = 0,
+ .n_msgq_in = 0,
+ .n_msgq_out = 0,
+ .timer_period = 1,
+ .n_args = 0,
+};
+
+static const char app_usage[] =
+ "Usage: %s [-f CONFIG_FILE] [-s SCRIPT_FILE] [-p PORT_MASK] "
+ "[-l LOG_LEVEL] [--preproc PREPROCESSOR] [--preproc-args ARGS]\n"
+ "\n"
+ "Arguments:\n"
+ "\t-f CONFIG_FILE: Default config file is %s\n"
+ "\t-p PORT_MASK: Mask of NIC port IDs in hex format (generated from "
+ "config file when not provided)\n"
+ "\t-s SCRIPT_FILE: No CLI script file is run when not specified\n"
+ "\t-l LOG_LEVEL: 0 = NONE, 1 = HIGH PRIO (default), 2 = LOW PRIO\n"
+ "\t--preproc PREPROCESSOR: Configuration file pre-processor\n"
+ "\t--preproc-args ARGS: Arguments to be passed to pre-processor\n"
+ "\n";
+
+static void
+app_print_usage(char *prgname)
+{
+ rte_exit(0, app_usage, prgname, app_params_default.config_file);
+}
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++); \
+ _p; \
+})
+
+#define PARSER_PARAM_ADD_CHECK(result, params_array, section_name) \
+do { \
+ APP_CHECK((result != -EINVAL), \
+ "Parse error: no free memory"); \
+ APP_CHECK((result != -ENOMEM), \
+ "Parse error: too many \"%s\" sections", section_name); \
+ APP_CHECK(((result >= 0) && (params_array)[result].parsed == 0),\
+ "Parse error: duplicate \"%s\" section", section_name); \
+ APP_CHECK((result >= 0), \
+ "Parse error in section \"%s\"", section_name); \
+} while (0)
+
+int
+parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+#define PARSE_ERROR(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": entry \"%s\"\n", section, entry)
+
+#define PARSE_ERROR_MESSAGE(exp, section, entry, message) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": %s\n", \
+ section, entry, message)
+
+
+#define PARSE_ERROR_MALLOC(exp) \
+APP_CHECK(exp, "Parse error: no free memory\n")
+
+#define PARSE_ERROR_SECTION(exp, section) \
+APP_CHECK(exp, "Parse error in section \"%s\"", section)
+
+#define PARSE_ERROR_SECTION_NO_ENTRIES(exp, section) \
+APP_CHECK(exp, "Parse error in section \"%s\": no entries\n", section)
+
+#define PARSE_WARNING_IGNORED(exp, section, entry) \
+do \
+if (!(exp)) \
+ fprintf(stderr, "Parse warning in section \"%s\": " \
+ "entry \"%s\" is ignored\n", section, entry); \
+while (0)
+
+#define PARSE_ERROR_INVALID(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": unrecognized entry \"%s\"\n",\
+ section, entry)
+
+#define PARSE_ERROR_DUPLICATE(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": duplicate entry \"%s\"\n",\
+ section, entry)
+
+int
+parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parse_pipeline_core(uint32_t *socket,
+ uint32_t *core,
+ uint32_t *ht,
+ const char *entry)
+{
+ size_t num_len;
+ char num[8];
+
+ uint32_t s = 0, c = 0, h = 0, val;
+ uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
+ const char *next = skip_white_spaces(entry);
+ char type;
+
+ /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
+ while (*next != '\0') {
+ /* If everything parsed nothing should left */
+ if (s_parsed && c_parsed && h_parsed)
+ return -EINVAL;
+
+ type = *next;
+ switch (type) {
+ case 's':
+ case 'S':
+ if (s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+ s_parsed = 1;
+ next++;
+ break;
+ case 'c':
+ case 'C':
+ if (c_parsed || h_parsed)
+ return -EINVAL;
+ c_parsed = 1;
+ next++;
+ break;
+ case 'h':
+ case 'H':
+ if (h_parsed)
+ return -EINVAL;
+ h_parsed = 1;
+ next++;
+ break;
+ default:
+ /* If it start from digit it must be only core id. */
+ if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+
+ type = 'C';
+ }
+
+ for (num_len = 0; *next != '\0'; next++, num_len++) {
+ if (num_len == RTE_DIM(num))
+ return -EINVAL;
+
+ if (!isdigit(*next))
+ break;
+
+ num[num_len] = *next;
+ }
+
+ if (num_len == 0 && type != 'h' && type != 'H')
+ return -EINVAL;
+
+ if (num_len != 0 && (type == 'h' || type == 'H'))
+ return -EINVAL;
+
+ num[num_len] = '\0';
+ val = strtol(num, NULL, 10);
+
+ h = 0;
+ switch (type) {
+ case 's':
+ case 'S':
+ s = val;
+ break;
+ case 'c':
+ case 'C':
+ c = val;
+ break;
+ case 'h':
+ case 'H':
+ h = 1;
+ break;
+ }
+ }
+
+ *socket = s;
+ *core = c;
+ *ht = h;
+ return 0;
+}
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+static size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++);
+
+ return i;
+}
+
+static int
+validate_name(const char *name, const char *prefix, int num)
+{
+ size_t i, j;
+
+ for (i = 0; (name[i] != '\0') && (prefix[i] != '\0'); i++) {
+ if (name[i] != prefix[i])
+ return -1;
+ }
+
+ if (prefix[i] != '\0')
+ return -1;
+
+ if (!num) {
+ if (name[i] != '\0')
+ return -1;
+ else
+ return 0;
+ }
+
+ if (num == 2) {
+ j = skip_digits(&name[i]);
+ i += j;
+ if ((j == 0) || (name[i] != '.'))
+ return -1;
+ i++;
+ }
+
+ if (num == 1) {
+ j = skip_digits(&name[i]);
+ i += j;
+ if ((j == 0) || (name[i] != '\0'))
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+parse_eal(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_eal_params *p = &app->eal_params;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *entry = &entries[i];
+
+ /* coremask */
+ if (strcmp(entry->name, "c") == 0) {
+ PARSE_WARNING_IGNORED(0, section_name, entry->name);
+ continue;
+ }
+
+ /* corelist */
+ if (strcmp(entry->name, "l") == 0) {
+ PARSE_WARNING_IGNORED(0, section_name, entry->name);
+ continue;
+ }
+
+ /* coremap */
+ if (strcmp(entry->name, "lcores") == 0) {
+ PARSE_ERROR_DUPLICATE((p->coremap == NULL),
+ section_name,
+ entry->name);
+ p->coremap = strdup(entry->value);
+ continue;
+ }
+
+ /* master_lcore */
+ if (strcmp(entry->name, "master_lcore") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->master_lcore_present == 0),
+ section_name,
+ entry->name);
+ p->master_lcore_present = 1;
+
+ status = parser_read_uint32(&p->master_lcore,
+ entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* channels */
+ if (strcmp(entry->name, "n") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->channels_present == 0),
+ section_name,
+ entry->name);
+ p->channels_present = 1;
+
+ status = parser_read_uint32(&p->channels, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* memory */
+ if (strcmp(entry->name, "m") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->memory_present == 0),
+ section_name,
+ entry->name);
+ p->memory_present = 1;
+
+ status = parser_read_uint32(&p->memory, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* ranks */
+ if (strcmp(entry->name, "r") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->ranks_present == 0),
+ section_name,
+ entry->name);
+ p->ranks_present = 1;
+
+ status = parser_read_uint32(&p->ranks, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* pci_blacklist */
+ if ((strcmp(entry->name, "pci_blacklist") == 0) ||
+ (strcmp(entry->name, "b") == 0)) {
+ uint32_t i;
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i])
+ continue;
+
+ p->pci_blacklist[i] =
+ strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->pci_blacklist[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* pci_whitelist */
+ if ((strcmp(entry->name, "pci_whitelist") == 0) ||
+ (strcmp(entry->name, "w") == 0)) {
+ uint32_t i;
+
+ PARSE_ERROR_MESSAGE((app->port_mask != 0),
+ section_name, entry->name, "entry to be "
+ "generated by the application (port_mask "
+ "not provided)");
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i])
+ continue;
+
+ p->pci_whitelist[i] = strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->pci_whitelist[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* vdev */
+ if (strcmp(entry->name, "vdev") == 0) {
+ uint32_t i;
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i])
+ continue;
+
+ p->vdev[i] = strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->vdev[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* vmware_tsc_map */
+ if (strcmp(entry->name, "vmware_tsc_map") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->vmware_tsc_map_present == 0),
+ section_name,
+ entry->name);
+ p->vmware_tsc_map_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->vmware_tsc_map = val;
+ continue;
+ }
+
+ /* proc_type */
+ if (strcmp(entry->name, "proc_type") == 0) {
+ PARSE_ERROR_DUPLICATE((p->proc_type == NULL),
+ section_name,
+ entry->name);
+ p->proc_type = strdup(entry->value);
+ continue;
+ }
+
+ /* syslog */
+ if (strcmp(entry->name, "syslog") == 0) {
+ PARSE_ERROR_DUPLICATE((p->syslog == NULL),
+ section_name,
+ entry->name);
+ p->syslog = strdup(entry->value);
+ continue;
+ }
+
+ /* log_level */
+ if (strcmp(entry->name, "log_level") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->log_level_present == 0),
+ section_name,
+ entry->name);
+ p->log_level_present = 1;
+
+ status = parser_read_uint32(&p->log_level,
+ entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* version */
+ if (strcmp(entry->name, "v") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->version_present == 0),
+ section_name,
+ entry->name);
+ p->version_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->version = val;
+ continue;
+ }
+
+ /* help */
+ if ((strcmp(entry->name, "help") == 0) ||
+ (strcmp(entry->name, "h") == 0)) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->help_present == 0),
+ section_name,
+ entry->name);
+ p->help_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->help = val;
+ continue;
+ }
+
+ /* no_huge */
+ if (strcmp(entry->name, "no_huge") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_huge_present == 0),
+ section_name,
+ entry->name);
+ p->no_huge_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_huge = val;
+ continue;
+ }
+
+ /* no_pci */
+ if (strcmp(entry->name, "no_pci") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_pci_present == 0),
+ section_name,
+ entry->name);
+ p->no_pci_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_pci = val;
+ continue;
+ }
+
+ /* no_hpet */
+ if (strcmp(entry->name, "no_hpet") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_hpet_present == 0),
+ section_name,
+ entry->name);
+ p->no_hpet_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_hpet = val;
+ continue;
+ }
+
+ /* no_shconf */
+ if (strcmp(entry->name, "no_shconf") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_shconf_present == 0),
+ section_name,
+ entry->name);
+ p->no_shconf_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_shconf = val;
+ continue;
+ }
+
+ /* add_driver */
+ if (strcmp(entry->name, "d") == 0) {
+ PARSE_ERROR_DUPLICATE((p->add_driver == NULL),
+ section_name,
+ entry->name);
+ p->add_driver = strdup(entry->value);
+ continue;
+ }
+
+ /* socket_mem */
+ if (strcmp(entry->name, "socket_mem") == 0) {
+ PARSE_ERROR_DUPLICATE((p->socket_mem == NULL),
+ section_name,
+ entry->name);
+ p->socket_mem = strdup(entry->value);
+ continue;
+ }
+
+ /* huge_dir */
+ if (strcmp(entry->name, "huge_dir") == 0) {
+ PARSE_ERROR_DUPLICATE((p->huge_dir == NULL),
+ section_name,
+ entry->name);
+ p->huge_dir = strdup(entry->value);
+ continue;
+ }
+
+ /* file_prefix */
+ if (strcmp(entry->name, "file_prefix") == 0) {
+ PARSE_ERROR_DUPLICATE((p->file_prefix == NULL),
+ section_name,
+ entry->name);
+ p->file_prefix = strdup(entry->value);
+ continue;
+ }
+
+ /* base_virtaddr */
+ if (strcmp(entry->name, "base_virtaddr") == 0) {
+ PARSE_ERROR_DUPLICATE((p->base_virtaddr == NULL),
+ section_name,
+ entry->name);
+ p->base_virtaddr = strdup(entry->value);
+ continue;
+ }
+
+ /* create_uio_dev */
+ if (strcmp(entry->name, "create_uio_dev") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->create_uio_dev_present == 0),
+ section_name,
+ entry->name);
+ p->create_uio_dev_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->create_uio_dev = val;
+ continue;
+ }
+
+ /* vfio_intr */
+ if (strcmp(entry->name, "vfio_intr") == 0) {
+ PARSE_ERROR_DUPLICATE((p->vfio_intr == NULL),
+ section_name,
+ entry->name);
+ p->vfio_intr = strdup(entry->value);
+ continue;
+ }
+
+ /* xen_dom0 */
+ if (strcmp(entry->name, "xen_dom0") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->xen_dom0_present == 0),
+ section_name,
+ entry->name);
+ p->xen_dom0_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->xen_dom0 = val;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, entry->name);
+ }
+
+ free(entries);
+}
+
+static int
+parse_pipeline_pcap_source(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *file_name, const char *cp_size)
+{
+ const char *next = NULL;
+ char *end;
+ uint32_t i;
+ int parse_file = 0;
+
+ if (file_name && !cp_size) {
+ next = file_name;
+ parse_file = 1; /* parse file path */
+ } else if (cp_size && !file_name) {
+ next = cp_size;
+ parse_file = 0; /* parse copy size */
+ } else
+ return -EINVAL;
+
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+
+ if (p->n_pktq_in == 0)
+ return -EINVAL;
+
+ i = 0;
+ while (*next != '\0') {
+ uint32_t id;
+
+ if (i >= p->n_pktq_in)
+ return -EINVAL;
+
+ id = p->pktq_in[i].id;
+
+ end = strchr(next, ' ');
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (parse_file) {
+ app->source_params[id].file_name = strdup(name);
+ if (app->source_params[id].file_name == NULL)
+ return -ENOMEM;
+ } else {
+ if (parser_read_uint32(
+ &app->source_params[id].n_bytes_per_pkt,
+ name) != 0) {
+ if (app->source_params[id].
+ file_name != NULL)
+ free(app->source_params[id].
+ file_name);
+ return -EINVAL;
+ }
+ }
+
+ i++;
+
+ if (i == p->n_pktq_in)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+parse_pipeline_pcap_sink(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *file_name, const char *n_pkts_to_dump)
+{
+ const char *next = NULL;
+ char *end;
+ uint32_t i;
+ int parse_file = 0;
+
+ if (file_name && !n_pkts_to_dump) {
+ next = file_name;
+ parse_file = 1; /* parse file path */
+ } else if (n_pkts_to_dump && !file_name) {
+ next = n_pkts_to_dump;
+ parse_file = 0; /* parse copy size */
+ } else
+ return -EINVAL;
+
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+
+ if (p->n_pktq_out == 0)
+ return -EINVAL;
+
+ i = 0;
+ while (*next != '\0') {
+ uint32_t id;
+
+ if (i >= p->n_pktq_out)
+ return -EINVAL;
+
+ id = p->pktq_out[i].id;
+
+ end = strchr(next, ' ');
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (parse_file) {
+ app->sink_params[id].file_name = strdup(name);
+ if (app->sink_params[id].file_name == NULL)
+ return -ENOMEM;
+ } else {
+ if (parser_read_uint32(
+ &app->sink_params[id].n_pkts_to_dump,
+ name) != 0) {
+ if (app->sink_params[id].file_name !=
+ NULL)
+ free(app->sink_params[id].
+ file_name);
+ return -EINVAL;
+ }
+ }
+
+ i++;
+
+ if (i == p->n_pktq_out)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+parse_pipeline_pktq_in(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *value)
+{
+ const char *next = value;
+ char *end;
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+
+ while (*next != '\0') {
+ enum app_pktq_in_type type;
+ int id;
+ char *end_space;
+ char *end_tab;
+
+ next = skip_white_spaces(next);
+ if (!next)
+ break;
+
+ end_space = strchr(next, ' ');
+ end_tab = strchr(next, ' ');
+
+ if (end_space && (!end_tab))
+ end = end_space;
+ else if ((!end_space) && end_tab)
+ end = end_tab;
+ else if (end_space && end_tab)
+ end = RTE_MIN(end_space, end_tab);
+ else
+ end = NULL;
+
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (validate_name(name, "RXQ", 2) == 0) {
+ type = APP_PKTQ_IN_HWQ;
+ id = APP_PARAM_ADD(app->hwq_in_params, name);
+ } else if (validate_name(name, "SWQ", 1) == 0) {
+ type = APP_PKTQ_IN_SWQ;
+ id = APP_PARAM_ADD(app->swq_params, name);
+ } else if (validate_name(name, "TM", 1) == 0) {
+ type = APP_PKTQ_IN_TM;
+ id = APP_PARAM_ADD(app->tm_params, name);
+ } else if (validate_name(name, "SOURCE", 1) == 0) {
+ type = APP_PKTQ_IN_SOURCE;
+ id = APP_PARAM_ADD(app->source_params, name);
+ } else
+ return -EINVAL;
+
+ if (id < 0)
+ return id;
+
+ p->pktq_in[p->n_pktq_in].type = type;
+ p->pktq_in[p->n_pktq_in].id = (uint32_t) id;
+ p->n_pktq_in++;
+ }
+
+ return 0;
+}
+
+static int
+parse_pipeline_pktq_out(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *value)
+{
+ const char *next = value;
+ char *end;
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+
+ while (*next != '\0') {
+ enum app_pktq_out_type type;
+ int id;
+ char *end_space;
+ char *end_tab;
+
+ next = skip_white_spaces(next);
+ if (!next)
+ break;
+
+ end_space = strchr(next, ' ');
+ end_tab = strchr(next, ' ');
+
+ if (end_space && (!end_tab))
+ end = end_space;
+ else if ((!end_space) && end_tab)
+ end = end_tab;
+ else if (end_space && end_tab)
+ end = RTE_MIN(end_space, end_tab);
+ else
+ end = NULL;
+
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+ if (validate_name(name, "TXQ", 2) == 0) {
+ type = APP_PKTQ_OUT_HWQ;
+ id = APP_PARAM_ADD(app->hwq_out_params, name);
+ } else if (validate_name(name, "SWQ", 1) == 0) {
+ type = APP_PKTQ_OUT_SWQ;
+ id = APP_PARAM_ADD(app->swq_params, name);
+ } else if (validate_name(name, "TM", 1) == 0) {
+ type = APP_PKTQ_OUT_TM;
+ id = APP_PARAM_ADD(app->tm_params, name);
+ } else if (validate_name(name, "SINK", 1) == 0) {
+ type = APP_PKTQ_OUT_SINK;
+ id = APP_PARAM_ADD(app->sink_params, name);
+ } else
+ return -EINVAL;
+
+ if (id < 0)
+ return id;
+
+ p->pktq_out[p->n_pktq_out].type = type;
+ p->pktq_out[p->n_pktq_out].id = id;
+ p->n_pktq_out++;
+ }
+
+ return 0;
+}
+
+static int
+parse_pipeline_msgq_in(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *value)
+{
+ const char *next = value;
+ char *end;
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+ ssize_t idx;
+
+ while (*next != '\0') {
+ char *end_space;
+ char *end_tab;
+
+ next = skip_white_spaces(next);
+ if (!next)
+ break;
+
+ end_space = strchr(next, ' ');
+ end_tab = strchr(next, ' ');
+
+ if (end_space && (!end_tab))
+ end = end_space;
+ else if ((!end_space) && end_tab)
+ end = end_tab;
+ else if (end_space && end_tab)
+ end = RTE_MIN(end_space, end_tab);
+ else
+ end = NULL;
+
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (validate_name(name, "MSGQ", 1) != 0)
+ return -EINVAL;
+
+ idx = APP_PARAM_ADD(app->msgq_params, name);
+ if (idx < 0)
+ return idx;
+
+ p->msgq_in[p->n_msgq_in] = idx;
+ p->n_msgq_in++;
+ }
+
+ return 0;
+}
+
+static int
+parse_pipeline_msgq_out(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *value)
+{
+ const char *next = value;
+ char *end;
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+ ssize_t idx;
+
+ while (*next != '\0') {
+ char *end_space;
+ char *end_tab;
+
+ next = skip_white_spaces(next);
+ if (!next)
+ break;
+
+ end_space = strchr(next, ' ');
+ end_tab = strchr(next, ' ');
+
+ if (end_space && (!end_tab))
+ end = end_space;
+ else if ((!end_space) && end_tab)
+ end = end_tab;
+ else if (end_space && end_tab)
+ end = RTE_MIN(end_space, end_tab);
+ else
+ end = NULL;
+
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (validate_name(name, "MSGQ", 1) != 0)
+ return -EINVAL;
+
+ idx = APP_PARAM_ADD(app->msgq_params, name);
+ if (idx < 0)
+ return idx;
+
+ p->msgq_out[p->n_msgq_out] = idx;
+ p->n_msgq_out++;
+ }
+
+ return 0;
+}
+
+static void
+parse_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ char name[CFG_NAME_LEN];
+ struct app_pipeline_params *param;
+ struct rte_cfgfile_entry *entries;
+ ssize_t param_idx;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->pipeline_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->pipeline_params, section_name);
+
+ param = &app->pipeline_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "type") == 0) {
+ int w_size = snprintf(param->type, RTE_DIM(param->type),
+ "%s", ent->value);
+
+ PARSE_ERROR(((w_size > 0) &&
+ (w_size < (int)RTE_DIM(param->type))),
+ section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "core") == 0) {
+ int status = parse_pipeline_core(
+ &param->socket_id, &param->core_id,
+ &param->hyper_th_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pktq_in") == 0) {
+ int status = parse_pipeline_pktq_in(app, param,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pktq_out") == 0) {
+ int status = parse_pipeline_pktq_out(app, param,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "msgq_in") == 0) {
+ int status = parse_pipeline_msgq_in(app, param,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "msgq_out") == 0) {
+ int status = parse_pipeline_msgq_out(app, param,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "timer_period") == 0) {
+ int status = parser_read_uint32(
+ &param->timer_period,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_file_rd") == 0) {
+ int status;
+
+#ifndef RTE_PORT_PCAP
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+#endif
+
+ status = parse_pipeline_pcap_source(app,
+ param, ent->value, NULL);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_bytes_rd_per_pkt") == 0) {
+ int status;
+
+#ifndef RTE_PORT_PCAP
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+#endif
+
+ status = parse_pipeline_pcap_source(app,
+ param, NULL, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_file_wr") == 0) {
+ int status;
+
+#ifndef RTE_PORT_PCAP
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+#endif
+
+ status = parse_pipeline_pcap_sink(app, param,
+ ent->value, NULL);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_n_pkt_wr") == 0) {
+ int status;
+
+#ifndef RTE_PORT_PCAP
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+#endif
+
+ status = parse_pipeline_pcap_sink(app, param,
+ NULL, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* pipeline type specific items */
+ APP_CHECK((param->n_args < APP_MAX_PIPELINE_ARGS),
+ "Parse error in section \"%s\": too many "
+ "pipeline specified parameters", section_name);
+
+ param->args_name[param->n_args] = strdup(ent->name);
+ param->args_value[param->n_args] = strdup(ent->value);
+
+ APP_CHECK((param->args_name[param->n_args] != NULL) &&
+ (param->args_value[param->n_args] != NULL),
+ "Parse error: no free memory");
+
+ param->n_args++;
+ }
+
+ param->parsed = 1;
+
+ snprintf(name, sizeof(name), "MSGQ-REQ-%s", section_name);
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+ param->msgq_in[param->n_msgq_in++] = param_idx;
+
+ snprintf(name, sizeof(name), "MSGQ-RSP-%s", section_name);
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+ param->msgq_out[param->n_msgq_out++] = param_idx;
+
+ snprintf(name, sizeof(name), "MSGQ-REQ-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ param->socket_id,
+ param->core_id,
+ (param->hyper_th_id) ? "h" : "");
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+
+ snprintf(name, sizeof(name), "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ param->socket_id,
+ param->core_id,
+ (param->hyper_th_id) ? "h" : "");
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+
+ free(entries);
+}
+
+static void
+parse_mempool(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_mempool_params *param;
+ struct rte_cfgfile_entry *entries;
+ ssize_t param_idx;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->mempool_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->mempool_params, section_name);
+
+ param = &app->mempool_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "buffer_size") == 0) {
+ int status = parser_read_uint32(
+ &param->buffer_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pool_size") == 0) {
+ int status = parser_read_uint32(
+ &param->pool_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cache_size") == 0) {
+ int status = parser_read_uint32(
+ &param->cache_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_link(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_link_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ int pci_bdf_present = 0;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->link_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->link_params, section_name);
+
+ param = &app->link_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "promisc") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->promisc = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "arp_q") == 0) {
+ int status = parser_read_uint32(&param->arp_q,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "tcp_syn_q") == 0) {
+ int status = parser_read_uint32(
+ &param->tcp_syn_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name, ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "ip_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->ip_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+
+ if (strcmp(ent->name, "tcp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->tcp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "udp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->udp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "sctp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->sctp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pci_bdf") == 0) {
+ PARSE_ERROR_DUPLICATE((pci_bdf_present == 0),
+ section_name, ent->name);
+
+ snprintf(param->pci_bdf, APP_LINK_PCI_BDF_SIZE,
+ "%s", ent->value);
+ pci_bdf_present = 1;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ /* Check for mandatory fields */
+ if (app->port_mask)
+ PARSE_ERROR_MESSAGE((pci_bdf_present == 0),
+ section_name, "pci_bdf",
+ "entry not allowed (port_mask is provided)");
+ else
+ PARSE_ERROR_MESSAGE((pci_bdf_present),
+ section_name, "pci_bdf",
+ "this entry is mandatory (port_mask is not "
+ "provided)");
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_rxq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_hwq_in_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->hwq_in_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->hwq_in_params, section_name);
+
+ param = &app->hwq_in_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ idx = APP_PARAM_ADD(app->mempool_params,
+ ent->value);
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
+ section_name);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_txq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_hwq_out_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->hwq_out_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->hwq_out_params, section_name);
+
+ param = &app->hwq_out_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_swq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_swq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ uint32_t mtu_present = 0;
+ uint32_t metadata_size_present = 0;
+ uint32_t mempool_direct_present = 0;
+ uint32_t mempool_indirect_present = 0;
+
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->swq_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->swq_params, section_name);
+
+ param = &app->swq_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(&
+ param->burst_read, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_write, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name, ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv4_frag") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+
+ param->ipv4_frag = status;
+ if (param->mtu == 0)
+ param->mtu = 1500;
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv6_frag") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv6_frag = status;
+ if (param->mtu == 0)
+ param->mtu = 1320;
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv4_ras") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv4_ras = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv6_ras") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv6_ras = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mtu") == 0) {
+ int status = parser_read_uint32(&param->mtu,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ mtu_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "metadata_size") == 0) {
+ int status = parser_read_uint32(
+ &param->metadata_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ metadata_size_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool_direct") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params,
+ ent->value);
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
+ section_name);
+ param->mempool_direct_id = idx;
+ mempool_direct_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool_indirect") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ idx = APP_PARAM_ADD(app->mempool_params,
+ ent->value);
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
+ section_name);
+ param->mempool_indirect_id = idx;
+ mempool_indirect_present = 1;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ APP_CHECK(((mtu_present) &&
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mtu\" is not allowed",
+ section_name);
+
+ APP_CHECK(((metadata_size_present) &&
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"metadata_size\" is "
+ "not allowed", section_name);
+
+ APP_CHECK(((mempool_direct_present) &&
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mempool_direct\" is "
+ "not allowed", section_name);
+
+ APP_CHECK(((mempool_indirect_present) &&
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mempool_indirect\" is "
+ "not allowed", section_name);
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_tm(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_tm_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->tm_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->tm_params, section_name);
+
+ param = &app->tm_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "cfg") == 0) {
+ param->file_name = strdup(ent->value);
+ PARSE_ERROR_MALLOC(param->file_name != NULL);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_read, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_write, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_source(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_source_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+ uint32_t pcap_file_present = 0;
+ uint32_t pcap_size_present = 0;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->source_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->source_params, section_name);
+
+ param = &app->source_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ idx = APP_PARAM_ADD(app->mempool_params,
+ ent->value);
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
+ section_name);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_file_rd")) {
+ PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
+ section_name, ent->name);
+
+ param->file_name = strdup(ent->value);
+
+ PARSE_ERROR_MALLOC(param->file_name != NULL);
+ pcap_file_present = 1;
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_bytes_rd_per_pkt") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((pcap_size_present == 0),
+ section_name, ent->name);
+
+ status = parser_read_uint32(
+ &param->n_bytes_per_pkt, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ pcap_size_present = 1;
+
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_sink(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_sink_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+ uint32_t pcap_file_present = 0;
+ uint32_t pcap_n_pkt_present = 0;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->sink_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->sink_params, section_name);
+
+ param = &app->sink_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "pcap_file_wr")) {
+ PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
+ section_name, ent->name);
+
+ param->file_name = strdup(ent->value);
+
+ PARSE_ERROR_MALLOC((param->file_name != NULL));
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_n_pkt_wr")) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((pcap_n_pkt_present == 0),
+ section_name, ent->name);
+
+ status = parser_read_uint32(
+ &param->n_pkts_to_dump, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_msgq_req_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
+
+ param = &app->msgq_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+ free(entries);
+}
+
+static void
+parse_msgq_rsp_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
+
+ param = &app->msgq_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_msgq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
+
+ param = &app->msgq_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+typedef void (*config_section_load)(struct app_params *p,
+ const char *section_name,
+ struct rte_cfgfile *cfg);
+
+struct config_section {
+ const char prefix[CFG_NAME_LEN];
+ int numbers;
+ config_section_load load;
+};
+
+static const struct config_section cfg_file_scheme[] = {
+ {"EAL", 0, parse_eal},
+ {"PIPELINE", 1, parse_pipeline},
+ {"MEMPOOL", 1, parse_mempool},
+ {"LINK", 1, parse_link},
+ {"RXQ", 2, parse_rxq},
+ {"TXQ", 2, parse_txq},
+ {"SWQ", 1, parse_swq},
+ {"TM", 1, parse_tm},
+ {"SOURCE", 1, parse_source},
+ {"SINK", 1, parse_sink},
+ {"MSGQ-REQ-PIPELINE", 1, parse_msgq_req_pipeline},
+ {"MSGQ-RSP-PIPELINE", 1, parse_msgq_rsp_pipeline},
+ {"MSGQ", 1, parse_msgq},
+};
+
+static void
+create_implicit_mempools(struct app_params *app)
+{
+ ssize_t idx;
+
+ idx = APP_PARAM_ADD(app->mempool_params, "MEMPOOL0");
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params, "start-up");
+}
+
+static void
+create_implicit_links_from_port_mask(struct app_params *app,
+ uint64_t port_mask)
+{
+ uint32_t pmd_id, link_id;
+
+ link_id = 0;
+ for (pmd_id = 0; pmd_id < RTE_MAX_ETHPORTS; pmd_id++) {
+ char name[APP_PARAM_NAME_SIZE];
+ ssize_t idx;
+
+ if ((port_mask & (1LLU << pmd_id)) == 0)
+ continue;
+
+ snprintf(name, sizeof(name), "LINK%" PRIu32, link_id);
+ idx = APP_PARAM_ADD(app->link_params, name);
+ PARSER_PARAM_ADD_CHECK(idx, app->link_params, name);
+
+ app->link_params[idx].pmd_id = pmd_id;
+ link_id++;
+ }
+}
+
+static void
+assign_link_pmd_id_from_pci_bdf(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+
+ link->pmd_id = i;
+ }
+}
+
+int
+app_config_parse(struct app_params *app, const char *file_name)
+{
+ struct rte_cfgfile *cfg;
+ char **section_names;
+ int i, j, sect_count;
+
+ /* Implicit mempools */
+ create_implicit_mempools(app);
+
+ /* Port mask */
+ if (app->port_mask)
+ create_implicit_links_from_port_mask(app, app->port_mask);
+
+ /* Load application configuration file */
+ cfg = rte_cfgfile_load(file_name, 0);
+ APP_CHECK((cfg != NULL), "Parse error: Unable to load config "
+ "file %s", file_name);
+
+ sect_count = rte_cfgfile_num_sections(cfg, NULL, 0);
+ APP_CHECK((sect_count > 0), "Parse error: number of sections "
+ "in file \"%s\" return %d", file_name,
+ sect_count);
+
+ section_names = malloc(sect_count * sizeof(char *));
+ PARSE_ERROR_MALLOC(section_names != NULL);
+
+ for (i = 0; i < sect_count; i++)
+ section_names[i] = malloc(CFG_NAME_LEN);
+
+ rte_cfgfile_sections(cfg, section_names, sect_count);
+
+ for (i = 0; i < sect_count; i++) {
+ const struct config_section *sch_s;
+ int len, cfg_name_len;
+
+ cfg_name_len = strlen(section_names[i]);
+
+ /* Find section type */
+ for (j = 0; j < (int)RTE_DIM(cfg_file_scheme); j++) {
+ sch_s = &cfg_file_scheme[j];
+ len = strlen(sch_s->prefix);
+
+ if (cfg_name_len < len)
+ continue;
+
+ /* After section name we expect only '\0' or digit or
+ * digit dot digit, so protect against false matching,
+ * for example: "ABC" should match section name
+ * "ABC0.0", but it should not match section_name
+ * "ABCDEF".
+ */
+ if ((section_names[i][len] != '\0') &&
+ !isdigit(section_names[i][len]))
+ continue;
+
+ if (strncmp(sch_s->prefix, section_names[i], len) == 0)
+ break;
+ }
+
+ APP_CHECK(j < (int)RTE_DIM(cfg_file_scheme),
+ "Parse error: unknown section %s",
+ section_names[i]);
+
+ APP_CHECK(validate_name(section_names[i],
+ sch_s->prefix,
+ sch_s->numbers) == 0,
+ "Parse error: invalid section name \"%s\"",
+ section_names[i]);
+
+ sch_s->load(app, section_names[i], cfg);
+ }
+
+ for (i = 0; i < sect_count; i++)
+ free(section_names[i]);
+
+ free(section_names);
+
+ rte_cfgfile_close(cfg);
+
+ APP_PARAM_COUNT(app->mempool_params, app->n_mempools);
+ APP_PARAM_COUNT(app->link_params, app->n_links);
+ APP_PARAM_COUNT(app->hwq_in_params, app->n_pktq_hwq_in);
+ APP_PARAM_COUNT(app->hwq_out_params, app->n_pktq_hwq_out);
+ APP_PARAM_COUNT(app->swq_params, app->n_pktq_swq);
+ APP_PARAM_COUNT(app->tm_params, app->n_pktq_tm);
+ APP_PARAM_COUNT(app->source_params, app->n_pktq_source);
+ APP_PARAM_COUNT(app->sink_params, app->n_pktq_sink);
+ APP_PARAM_COUNT(app->msgq_params, app->n_msgq);
+ APP_PARAM_COUNT(app->pipeline_params, app->n_pipelines);
+
+#ifdef RTE_PORT_PCAP
+ for (i = 0; i < (int)app->n_pktq_source; i++) {
+ struct app_pktq_source_params *p = &app->source_params[i];
+
+ APP_CHECK((p->file_name), "Parse error: missing "
+ "mandatory field \"pcap_file_rd\" for \"%s\"",
+ p->name);
+ }
+#else
+ for (i = 0; i < (int)app->n_pktq_source; i++) {
+ struct app_pktq_source_params *p = &app->source_params[i];
+
+ APP_CHECK((!p->file_name), "Parse error: invalid field "
+ "\"pcap_file_rd\" for \"%s\"", p->name);
+ }
+#endif
+
+ if (app->port_mask == 0)
+ assign_link_pmd_id_from_pci_bdf(app);
+
+ /* Save configuration to output file */
+ app_config_save(app, app->output_file);
+
+ /* Load TM configuration files */
+ app_config_parse_tm(app);
+
+ return 0;
+}
+
+static void
+save_eal_params(struct app_params *app, FILE *f)
+{
+ struct app_eal_params *p = &app->eal_params;
+ uint32_t i;
+
+ fprintf(f, "[EAL]\n");
+
+ if (p->coremap)
+ fprintf(f, "%s = %s\n", "lcores", p->coremap);
+
+ if (p->master_lcore_present)
+ fprintf(f, "%s = %" PRIu32 "\n",
+ "master_lcore", p->master_lcore);
+
+ fprintf(f, "%s = %" PRIu32 "\n", "n", p->channels);
+
+ if (p->memory_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "m", p->memory);
+
+ if (p->ranks_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "r", p->ranks);
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "pci_blacklist",
+ p->pci_blacklist[i]);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "pci_whitelist",
+ p->pci_whitelist[i]);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "vdev",
+ p->vdev[i]);
+ }
+
+ if (p->vmware_tsc_map_present)
+ fprintf(f, "%s = %s\n", "vmware_tsc_map",
+ (p->vmware_tsc_map) ? "yes" : "no");
+
+ if (p->proc_type)
+ fprintf(f, "%s = %s\n", "proc_type", p->proc_type);
+
+ if (p->syslog)
+ fprintf(f, "%s = %s\n", "syslog", p->syslog);
+
+ if (p->log_level_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "log_level", p->log_level);
+
+ if (p->version_present)
+ fprintf(f, "%s = %s\n", "v", (p->version) ? "yes" : "no");
+
+ if (p->help_present)
+ fprintf(f, "%s = %s\n", "help", (p->help) ? "yes" : "no");
+
+ if (p->no_huge_present)
+ fprintf(f, "%s = %s\n", "no_huge", (p->no_huge) ? "yes" : "no");
+
+ if (p->no_pci_present)
+ fprintf(f, "%s = %s\n", "no_pci", (p->no_pci) ? "yes" : "no");
+
+ if (p->no_hpet_present)
+ fprintf(f, "%s = %s\n", "no_hpet", (p->no_hpet) ? "yes" : "no");
+
+ if (p->no_shconf_present)
+ fprintf(f, "%s = %s\n", "no_shconf",
+ (p->no_shconf) ? "yes" : "no");
+
+ if (p->add_driver)
+ fprintf(f, "%s = %s\n", "d", p->add_driver);
+
+ if (p->socket_mem)
+ fprintf(f, "%s = %s\n", "socket_mem", p->socket_mem);
+
+ if (p->huge_dir)
+ fprintf(f, "%s = %s\n", "huge_dir", p->huge_dir);
+
+ if (p->file_prefix)
+ fprintf(f, "%s = %s\n", "file_prefix", p->file_prefix);
+
+ if (p->base_virtaddr)
+ fprintf(f, "%s = %s\n", "base_virtaddr", p->base_virtaddr);
+
+ if (p->create_uio_dev_present)
+ fprintf(f, "%s = %s\n", "create_uio_dev",
+ (p->create_uio_dev) ? "yes" : "no");
+
+ if (p->vfio_intr)
+ fprintf(f, "%s = %s\n", "vfio_intr", p->vfio_intr);
+
+ if (p->xen_dom0_present)
+ fprintf(f, "%s = %s\n", "xen_dom0",
+ (p->xen_dom0) ? "yes" : "no");
+
+ fputc('\n', f);
+}
+
+static void
+save_mempool_params(struct app_params *app, FILE *f)
+{
+ struct app_mempool_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->mempool_params);
+ for (i = 0; i < count; i++) {
+ p = &app->mempool_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "buffer_size", p->buffer_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "pool_size", p->pool_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cache_size", p->cache_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_links_params(struct app_params *app, FILE *f)
+{
+ struct app_link_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->link_params);
+ for (i = 0; i < count; i++) {
+ p = &app->link_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "; %s = %" PRIu32 "\n", "pmd_id", p->pmd_id);
+ fprintf(f, "%s = %s\n", "promisc", p->promisc ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu32 "\n", "arp_q", p->arp_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "tcp_syn_q",
+ p->tcp_syn_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "ip_local_q", p->ip_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "tcp_local_q", p->tcp_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "udp_local_q", p->udp_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "sctp_local_q",
+ p->sctp_local_q);
+
+ if (strlen(p->pci_bdf))
+ fprintf(f, "%s = %s\n", "pci_bdf", p->pci_bdf);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_rxq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_hwq_in_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->hwq_in_params);
+ for (i = 0; i < count; i++) {
+ p = &app->hwq_in_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n",
+ "mempool",
+ app->mempool_params[p->mempool_id].name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_txq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_hwq_out_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->hwq_out_params);
+ for (i = 0; i < count; i++) {
+ p = &app->hwq_out_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+ fprintf(f, "%s = %s\n",
+ "dropless",
+ p->dropless ? "yes" : "no");
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_swq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_swq_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->swq_params);
+ for (i = 0; i < count; i++) {
+ p = &app->swq_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+ fprintf(f, "%s = %s\n", "dropless", p->dropless ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+ fprintf(f, "%s = %s\n", "ipv4_frag", p->ipv4_frag ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv6_frag", p->ipv6_frag ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv4_ras", p->ipv4_ras ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv6_ras", p->ipv6_ras ? "yes" : "no");
+ if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1)) {
+ fprintf(f, "%s = %" PRIu32 "\n", "mtu", p->mtu);
+ fprintf(f, "%s = %" PRIu32 "\n", "metadata_size", p->metadata_size);
+ fprintf(f, "%s = %s\n",
+ "mempool_direct",
+ app->mempool_params[p->mempool_direct_id].name);
+ fprintf(f, "%s = %s\n",
+ "mempool_indirect",
+ app->mempool_params[p->mempool_indirect_id].name);
+ }
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_tm_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_tm_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->tm_params);
+ for (i = 0; i < count; i++) {
+ p = &app->tm_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n", "cfg", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_source_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_source_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->source_params);
+ for (i = 0; i < count; i++) {
+ p = &app->source_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n",
+ "mempool",
+ app->mempool_params[p->mempool_id].name);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+ fprintf(f, "%s = %s\n", "pcap_file_rd", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n", "pcap_bytes_rd_per_pkt",
+ p->n_bytes_per_pkt);
+ fputc('\n', f);
+ }
+}
+
+static void
+save_sink_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_sink_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->sink_params);
+ for (i = 0; i < count; i++) {
+ p = &app->sink_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n", "pcap_file_wr", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n",
+ "pcap_n_pkt_wr", p->n_pkts_to_dump);
+ fputc('\n', f);
+ }
+}
+
+static void
+save_msgq_params(struct app_params *app, FILE *f)
+{
+ struct app_msgq_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->msgq_params);
+ for (i = 0; i < count; i++) {
+ p = &app->msgq_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_pipeline_params(struct app_params *app, FILE *f)
+{
+ size_t i, count;
+
+ count = RTE_DIM(app->pipeline_params);
+ for (i = 0; i < count; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ /* section name */
+ fprintf(f, "[%s]\n", p->name);
+
+ /* type */
+ fprintf(f, "type = %s\n", p->type);
+
+ /* core */
+ fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
+ p->socket_id,
+ p->core_id,
+ (p->hyper_th_id) ? "h" : "");
+
+ /* pktq_in */
+ if (p->n_pktq_in) {
+ uint32_t j;
+
+ fprintf(f, "pktq_in =");
+ for (j = 0; j < p->n_pktq_in; j++) {
+ struct app_pktq_in_params *pp = &p->pktq_in[j];
+ char *name;
+
+ switch (pp->type) {
+ case APP_PKTQ_IN_HWQ:
+ name = app->hwq_in_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_SWQ:
+ name = app->swq_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_TM:
+ name = app->tm_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_SOURCE:
+ name = app->source_params[pp->id].name;
+ break;
+ default:
+ APP_CHECK(0, "System error "
+ "occurred while saving "
+ "parameter to file");
+ }
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* pktq_in */
+ if (p->n_pktq_out) {
+ uint32_t j;
+
+ fprintf(f, "pktq_out =");
+ for (j = 0; j < p->n_pktq_out; j++) {
+ struct app_pktq_out_params *pp =
+ &p->pktq_out[j];
+ char *name;
+
+ switch (pp->type) {
+ case APP_PKTQ_OUT_HWQ:
+ name = app->hwq_out_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_SWQ:
+ name = app->swq_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_TM:
+ name = app->tm_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_SINK:
+ name = app->sink_params[pp->id].name;
+ break;
+ default:
+ APP_CHECK(0, "System error "
+ "occurred while saving "
+ "parameter to file");
+ }
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* msgq_in */
+ if (p->n_msgq_in) {
+ uint32_t j;
+
+ fprintf(f, "msgq_in =");
+ for (j = 0; j < p->n_msgq_in; j++) {
+ uint32_t id = p->msgq_in[j];
+ char *name = app->msgq_params[id].name;
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* msgq_out */
+ if (p->n_msgq_out) {
+ uint32_t j;
+
+ fprintf(f, "msgq_out =");
+ for (j = 0; j < p->n_msgq_out; j++) {
+ uint32_t id = p->msgq_out[j];
+ char *name = app->msgq_params[id].name;
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* timer_period */
+ fprintf(f, "timer_period = %" PRIu32 "\n", p->timer_period);
+
+ /* args */
+ if (p->n_args) {
+ uint32_t j;
+
+ for (j = 0; j < p->n_args; j++)
+ fprintf(f, "%s = %s\n", p->args_name[j],
+ p->args_value[j]);
+ }
+
+ fprintf(f, "\n");
+ }
+}
+
+void
+app_config_save(struct app_params *app, const char *file_name)
+{
+ FILE *file;
+ char *name, *dir_name;
+ int status;
+
+ name = strdup(file_name);
+ dir_name = dirname(name);
+ status = access(dir_name, W_OK);
+ APP_CHECK((status == 0),
+ "Error: need write access privilege to directory "
+ "\"%s\" to save configuration\n", dir_name);
+
+ file = fopen(file_name, "w");
+ APP_CHECK((file != NULL),
+ "Error: failed to save configuration to file \"%s\"",
+ file_name);
+
+ save_eal_params(app, file);
+ save_pipeline_params(app, file);
+ save_mempool_params(app, file);
+ save_links_params(app, file);
+ save_rxq_params(app, file);
+ save_txq_params(app, file);
+ save_swq_params(app, file);
+ save_tm_params(app, file);
+ save_source_params(app, file);
+ save_sink_params(app, file);
+ save_msgq_params(app, file);
+
+ fclose(file);
+ free(name);
+}
+
+int
+app_config_init(struct app_params *app)
+{
+ size_t i;
+
+ memcpy(app, &app_params_default, sizeof(struct app_params));
+
+ for (i = 0; i < RTE_DIM(app->mempool_params); i++)
+ memcpy(&app->mempool_params[i],
+ &mempool_params_default,
+ sizeof(struct app_mempool_params));
+
+ for (i = 0; i < RTE_DIM(app->link_params); i++)
+ memcpy(&app->link_params[i],
+ &link_params_default,
+ sizeof(struct app_link_params));
+
+ for (i = 0; i < RTE_DIM(app->hwq_in_params); i++)
+ memcpy(&app->hwq_in_params[i],
+ &default_hwq_in_params,
+ sizeof(default_hwq_in_params));
+
+ for (i = 0; i < RTE_DIM(app->hwq_out_params); i++)
+ memcpy(&app->hwq_out_params[i],
+ &default_hwq_out_params,
+ sizeof(default_hwq_out_params));
+
+ for (i = 0; i < RTE_DIM(app->swq_params); i++)
+ memcpy(&app->swq_params[i],
+ &default_swq_params,
+ sizeof(default_swq_params));
+
+ for (i = 0; i < RTE_DIM(app->tm_params); i++)
+ memcpy(&app->tm_params[i],
+ &default_tm_params,
+ sizeof(default_tm_params));
+
+ for (i = 0; i < RTE_DIM(app->source_params); i++)
+ memcpy(&app->source_params[i],
+ &default_source_params,
+ sizeof(default_source_params));
+
+ for (i = 0; i < RTE_DIM(app->sink_params); i++)
+ memcpy(&app->sink_params[i],
+ &default_sink_params,
+ sizeof(default_sink_params));
+
+ for (i = 0; i < RTE_DIM(app->msgq_params); i++)
+ memcpy(&app->msgq_params[i],
+ &default_msgq_params,
+ sizeof(default_msgq_params));
+
+ for (i = 0; i < RTE_DIM(app->pipeline_params); i++)
+ memcpy(&app->pipeline_params[i],
+ &default_pipeline_params,
+ sizeof(default_pipeline_params));
+
+ return 0;
+}
+
+static char *
+filenamedup(const char *filename, const char *suffix)
+{
+ char *s = malloc(strlen(filename) + strlen(suffix) + 1);
+
+ if (!s)
+ return NULL;
+
+ sprintf(s, "%s%s", filename, suffix);
+ return s;
+}
+
+int
+app_config_args(struct app_params *app, int argc, char **argv)
+{
+ const char *optname;
+ int opt, option_index;
+ int f_present, s_present, p_present, l_present;
+ int preproc_present, preproc_params_present;
+ int scaned = 0;
+
+ static struct option lgopts[] = {
+ { "preproc", 1, 0, 0 },
+ { "preproc-args", 1, 0, 0 },
+ { NULL, 0, 0, 0 }
+ };
+
+ /* Copy application name */
+ strncpy(app->app_name, argv[0], APP_APPNAME_SIZE - 1);
+
+ f_present = 0;
+ s_present = 0;
+ p_present = 0;
+ l_present = 0;
+ preproc_present = 0;
+ preproc_params_present = 0;
+
+ while ((opt = getopt_long(argc, argv, "f:s:p:l:", lgopts,
+ &option_index)) != EOF)
+ switch (opt) {
+ case 'f':
+ if (f_present)
+ rte_panic("Error: Config file is provided "
+ "more than once\n");
+ f_present = 1;
+
+ if (!strlen(optarg))
+ rte_panic("Error: Config file name is null\n");
+
+ app->config_file = strdup(optarg);
+ if (app->config_file == NULL)
+ rte_panic("Error: Memory allocation failure\n");
+
+ break;
+
+ case 's':
+ if (s_present)
+ rte_panic("Error: Script file is provided "
+ "more than once\n");
+ s_present = 1;
+
+ if (!strlen(optarg))
+ rte_panic("Error: Script file name is null\n");
+
+ app->script_file = strdup(optarg);
+ if (app->script_file == NULL)
+ rte_panic("Error: Memory allocation failure\n");
+
+ break;
+
+ case 'p':
+ if (p_present)
+ rte_panic("Error: PORT_MASK is provided "
+ "more than once\n");
+ p_present = 1;
+
+ if ((sscanf(optarg, "%" SCNx64 "%n", &app->port_mask,
+ &scaned) != 1) ||
+ ((size_t) scaned != strlen(optarg)))
+ rte_panic("Error: PORT_MASK is not "
+ "a hexadecimal integer\n");
+
+ if (app->port_mask == 0)
+ rte_panic("Error: PORT_MASK is null\n");
+
+ break;
+
+ case 'l':
+ if (l_present)
+ rte_panic("Error: LOG_LEVEL is provided "
+ "more than once\n");
+ l_present = 1;
+
+ if ((sscanf(optarg, "%" SCNu32 "%n", &app->log_level,
+ &scaned) != 1) ||
+ ((size_t) scaned != strlen(optarg)) ||
+ (app->log_level >= APP_LOG_LEVELS))
+ rte_panic("Error: LOG_LEVEL invalid value\n");
+
+ break;
+
+ case 0:
+ optname = lgopts[option_index].name;
+
+ if (strcmp(optname, "preproc") == 0) {
+ if (preproc_present)
+ rte_panic("Error: Preprocessor argument "
+ "is provided more than once\n");
+ preproc_present = 1;
+
+ app->preproc = strdup(optarg);
+ break;
+ }
+
+ if (strcmp(optname, "preproc-args") == 0) {
+ if (preproc_params_present)
+ rte_panic("Error: Preprocessor args "
+ "are provided more than once\n");
+ preproc_params_present = 1;
+
+ app->preproc_args = strdup(optarg);
+ break;
+ }
+
+ app_print_usage(argv[0]);
+ break;
+
+ default:
+ app_print_usage(argv[0]);
+ }
+
+ optind = 0; /* reset getopt lib */
+
+ /* Check dependencies between args */
+ if (preproc_params_present && (preproc_present == 0))
+ rte_panic("Error: Preprocessor args specified while "
+ "preprocessor is not defined\n");
+
+ app->parser_file = preproc_present ?
+ filenamedup(app->config_file, ".preproc") :
+ strdup(app->config_file);
+ app->output_file = filenamedup(app->config_file, ".out");
+
+ return 0;
+}
+
+int
+app_config_preproc(struct app_params *app)
+{
+ char buffer[256];
+ int status;
+
+ if (app->preproc == NULL)
+ return 0;
+
+ status = access(app->config_file, F_OK | R_OK);
+ APP_CHECK((status == 0), "Error: Unable to open file %s",
+ app->config_file);
+
+ snprintf(buffer, sizeof(buffer), "%s %s %s > %s",
+ app->preproc,
+ app->preproc_args ? app->preproc_args : "",
+ app->config_file,
+ app->parser_file);
+
+ status = system(buffer);
+ APP_CHECK((WIFEXITED(status) && (WEXITSTATUS(status) == 0)),
+ "Error occurred while pre-processing file \"%s\"\n",
+ app->config_file);
+
+ return status;
+}
diff --git a/examples/ip_pipeline/config_parse_tm.c b/examples/ip_pipeline/config_parse_tm.c
new file mode 100644
index 00000000..e75eed71
--- /dev/null
+++ b/examples/ip_pipeline/config_parse_tm.c
@@ -0,0 +1,448 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+
+static int
+tm_cfgfile_load_sched_port(
+ struct rte_cfgfile *file,
+ struct rte_sched_port_params *port_params)
+{
+ const char *entry;
+ int j;
+
+ entry = rte_cfgfile_get_entry(file, "port", "frame overhead");
+ if (entry)
+ port_params->frame_overhead = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, "port", "mtu");
+ if (entry)
+ port_params->mtu = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ "port",
+ "number of subports per port");
+ if (entry)
+ port_params->n_subports_per_port = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ "port",
+ "number of pipes per subport");
+ if (entry)
+ port_params->n_pipes_per_subport = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, "port", "queue sizes");
+ if (entry) {
+ char *next;
+
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ port_params->qsize[j] = (uint16_t)
+ strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+#ifdef RTE_SCHED_RED
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ char str[32];
+
+ /* Parse WRED min thresholds */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred min", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].min_th
+ = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED max thresholds */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred max", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].max_th
+ = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED inverse mark probabilities */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred inv prob", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].maxp_inv
+ = (uint8_t)strtol(entry, &next, 10);
+
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED EWMA filter weights */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred weight", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].wq_log2
+ = (uint8_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ }
+#endif /* RTE_SCHED_RED */
+
+ return 0;
+}
+
+static int
+tm_cfgfile_load_sched_pipe(
+ struct rte_cfgfile *file,
+ struct rte_sched_port_params *port_params,
+ struct rte_sched_pipe_params *pipe_params)
+{
+ int i, j;
+ char *next;
+ const char *entry;
+ int profiles;
+
+ profiles = rte_cfgfile_num_sections(file,
+ "pipe profile", sizeof("pipe profile") - 1);
+ port_params->n_pipe_profiles = profiles;
+
+ for (j = 0; j < profiles; j++) {
+ char pipe_name[32];
+
+ snprintf(pipe_name, sizeof(pipe_name),
+ "pipe profile %" PRId32, j);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tb rate");
+ if (entry)
+ pipe_params[j].tb_rate = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tb size");
+ if (entry)
+ pipe_params[j].tb_size = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc period");
+ if (entry)
+ pipe_params[j].tc_period = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 0 rate");
+ if (entry)
+ pipe_params[j].tc_rate[0] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 1 rate");
+ if (entry)
+ pipe_params[j].tc_rate[1] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 2 rate");
+ if (entry)
+ pipe_params[j].tc_rate[2] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 3 rate");
+ if (entry)
+ pipe_params[j].tc_rate[3] = (uint32_t) atoi(entry);
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ entry = rte_cfgfile_get_entry(file, pipe_name,
+ "tc 3 oversubscription weight");
+ if (entry)
+ pipe_params[j].tc_ov_weight = (uint8_t)atoi(entry);
+#endif
+
+ entry = rte_cfgfile_get_entry(file,
+ pipe_name,
+ "tc 0 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*0 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 1 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*1 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 2 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*2 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 3 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*3 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ return 0;
+}
+
+static int
+tm_cfgfile_load_sched_subport(
+ struct rte_cfgfile *file,
+ struct rte_sched_subport_params *subport_params,
+ int *pipe_to_profile)
+{
+ const char *entry;
+ int i, j, k;
+
+ for (i = 0; i < APP_MAX_SCHED_SUBPORTS; i++) {
+ char sec_name[CFG_NAME_LEN];
+
+ snprintf(sec_name, sizeof(sec_name),
+ "subport %" PRId32, i);
+
+ if (rte_cfgfile_has_section(file, sec_name)) {
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tb rate");
+ if (entry)
+ subport_params[i].tb_rate =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tb size");
+ if (entry)
+ subport_params[i].tb_size =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc period");
+ if (entry)
+ subport_params[i].tc_period =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 0 rate");
+ if (entry)
+ subport_params[i].tc_rate[0] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 1 rate");
+ if (entry)
+ subport_params[i].tc_rate[1] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 2 rate");
+ if (entry)
+ subport_params[i].tc_rate[2] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 3 rate");
+ if (entry)
+ subport_params[i].tc_rate[3] =
+ (uint32_t) atoi(entry);
+
+ int n_entries = rte_cfgfile_section_num_entries(file,
+ sec_name);
+ struct rte_cfgfile_entry entries[n_entries];
+
+ rte_cfgfile_section_entries(file,
+ sec_name,
+ entries,
+ n_entries);
+
+ for (j = 0; j < n_entries; j++)
+ if (strncmp("pipe",
+ entries[j].name,
+ sizeof("pipe") - 1) == 0) {
+ int profile;
+ char *tokens[2] = {NULL, NULL};
+ int n_tokens;
+ int begin, end;
+ char name[CFG_NAME_LEN + 1];
+
+ profile = atoi(entries[j].value);
+ strncpy(name,
+ entries[j].name,
+ sizeof(name));
+ n_tokens = rte_strsplit(
+ &name[sizeof("pipe")],
+ strnlen(name, CFG_NAME_LEN),
+ tokens, 2, '-');
+
+ begin = atoi(tokens[0]);
+ if (n_tokens == 2)
+ end = atoi(tokens[1]);
+ else
+ end = begin;
+
+ if ((end >= APP_MAX_SCHED_PIPES) ||
+ (begin > end))
+ return -1;
+
+ for (k = begin; k <= end; k++) {
+ char profile_name[CFG_NAME_LEN];
+
+ snprintf(profile_name,
+ sizeof(profile_name),
+ "pipe profile %" PRId32,
+ profile);
+ if (rte_cfgfile_has_section(file, profile_name))
+ pipe_to_profile[i * APP_MAX_SCHED_PIPES + k] = profile;
+ else
+ rte_exit(EXIT_FAILURE,
+ "Wrong pipe profile %s\n",
+ entries[j].value);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+tm_cfgfile_load(struct app_pktq_tm_params *tm)
+{
+ struct rte_cfgfile *file;
+ uint32_t i;
+
+ memset(tm->sched_subport_params, 0, sizeof(tm->sched_subport_params));
+ memset(tm->sched_pipe_profiles, 0, sizeof(tm->sched_pipe_profiles));
+ memset(&tm->sched_port_params, 0, sizeof(tm->sched_port_params));
+ for (i = 0; i < APP_MAX_SCHED_SUBPORTS * APP_MAX_SCHED_PIPES; i++)
+ tm->sched_pipe_to_profile[i] = -1;
+
+ tm->sched_port_params.pipe_profiles = &tm->sched_pipe_profiles[0];
+
+ if (tm->file_name[0] == '\0')
+ return -1;
+
+ file = rte_cfgfile_load(tm->file_name, 0);
+ if (file == NULL)
+ return -1;
+
+ tm_cfgfile_load_sched_port(file,
+ &tm->sched_port_params);
+ tm_cfgfile_load_sched_subport(file,
+ tm->sched_subport_params,
+ tm->sched_pipe_to_profile);
+ tm_cfgfile_load_sched_pipe(file,
+ &tm->sched_port_params,
+ tm->sched_pipe_profiles);
+
+ rte_cfgfile_close(file);
+ return 0;
+}
+
+int
+app_config_parse_tm(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(app->tm_params); i++) {
+ struct app_pktq_tm_params *p = &app->tm_params[i];
+ int status;
+
+ if (!APP_PARAM_VALID(p))
+ break;
+
+ status = tm_cfgfile_load(p);
+ APP_CHECK(status == 0,
+ "Parse error for %s configuration file \"%s\"\n",
+ p->name,
+ p->file_name);
+ }
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/cpu_core_map.c b/examples/ip_pipeline/cpu_core_map.c
new file mode 100644
index 00000000..cb088b1c
--- /dev/null
+++ b/examples/ip_pipeline/cpu_core_map.c
@@ -0,0 +1,492 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+
+#include "cpu_core_map.h"
+
+struct cpu_core_map {
+ uint32_t n_max_sockets;
+ uint32_t n_max_cores_per_socket;
+ uint32_t n_max_ht_per_core;
+ uint32_t n_sockets;
+ uint32_t n_cores_per_socket;
+ uint32_t n_ht_per_core;
+ int map[0];
+};
+
+static inline uint32_t
+cpu_core_map_pos(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id)
+{
+ return (socket_id * map->n_max_cores_per_socket + core_id) *
+ map->n_max_ht_per_core + ht_id;
+}
+
+static int
+cpu_core_map_compute_eal(struct cpu_core_map *map);
+
+static int
+cpu_core_map_compute_linux(struct cpu_core_map *map);
+
+static int
+cpu_core_map_compute_and_check(struct cpu_core_map *map);
+
+struct cpu_core_map *
+cpu_core_map_init(uint32_t n_max_sockets,
+ uint32_t n_max_cores_per_socket,
+ uint32_t n_max_ht_per_core,
+ uint32_t eal_initialized)
+{
+ uint32_t map_size, map_mem_size, i;
+ struct cpu_core_map *map;
+ int status;
+
+ /* Check input arguments */
+ if ((n_max_sockets == 0) ||
+ (n_max_cores_per_socket == 0) ||
+ (n_max_ht_per_core == 0))
+ return NULL;
+
+ /* Memory allocation */
+ map_size = n_max_sockets * n_max_cores_per_socket * n_max_ht_per_core;
+ map_mem_size = sizeof(struct cpu_core_map) + map_size * sizeof(int);
+ map = (struct cpu_core_map *) malloc(map_mem_size);
+ if (map == NULL)
+ return NULL;
+
+ /* Initialization */
+ map->n_max_sockets = n_max_sockets;
+ map->n_max_cores_per_socket = n_max_cores_per_socket;
+ map->n_max_ht_per_core = n_max_ht_per_core;
+ map->n_sockets = 0;
+ map->n_cores_per_socket = 0;
+ map->n_ht_per_core = 0;
+
+ for (i = 0; i < map_size; i++)
+ map->map[i] = -1;
+
+ status = (eal_initialized) ?
+ cpu_core_map_compute_eal(map) :
+ cpu_core_map_compute_linux(map);
+
+ if (status) {
+ free(map);
+ return NULL;
+ }
+
+ status = cpu_core_map_compute_and_check(map);
+ if (status) {
+ free(map);
+ return NULL;
+ }
+
+ return map;
+}
+
+int
+cpu_core_map_compute_eal(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ /* Compute map */
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t n_detected, core_id_contig;
+ int lcore_id;
+
+ n_detected = 0;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ struct lcore_config *p = &lcore_config[lcore_id];
+
+ if ((p->detected) && (p->socket_id == socket_id))
+ n_detected++;
+ }
+
+ core_id_contig = 0;
+
+ for (core_id = 0; n_detected ; core_id++) {
+ ht_id = 0;
+
+ for (lcore_id = 0;
+ lcore_id < RTE_MAX_LCORE;
+ lcore_id++) {
+ struct lcore_config *p =
+ &lcore_config[lcore_id];
+
+ if ((p->detected) &&
+ (p->socket_id == socket_id) &&
+ (p->core_id == core_id)) {
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id_contig,
+ ht_id);
+
+ map->map[pos] = lcore_id;
+ ht_id++;
+ n_detected--;
+ }
+ }
+
+ if (ht_id) {
+ core_id_contig++;
+ if (core_id_contig ==
+ map->n_max_cores_per_socket)
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+cpu_core_map_compute_and_check(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ /* Compute n_ht_per_core, n_cores_per_socket, n_sockets */
+ for (ht_id = 0; ht_id < map->n_max_ht_per_core; ht_id++) {
+ if (map->map[ht_id] == -1)
+ break;
+
+ map->n_ht_per_core++;
+ }
+
+ if (map->n_ht_per_core == 0)
+ return -1;
+
+ for (core_id = 0; core_id < map->n_max_cores_per_socket; core_id++) {
+ uint32_t pos = core_id * map->n_max_ht_per_core;
+
+ if (map->map[pos] == -1)
+ break;
+
+ map->n_cores_per_socket++;
+ }
+
+ if (map->n_cores_per_socket == 0)
+ return -1;
+
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t pos = socket_id * map->n_max_cores_per_socket *
+ map->n_max_ht_per_core;
+
+ if (map->map[pos] == -1)
+ break;
+
+ map->n_sockets++;
+ }
+
+ if (map->n_sockets == 0)
+ return -1;
+
+ /* Check that each socket has exactly the same number of cores
+ and that each core has exactly the same number of hyper-threads */
+ for (socket_id = 0; socket_id < map->n_sockets; socket_id++) {
+ for (core_id = 0; core_id < map->n_cores_per_socket; core_id++)
+ for (ht_id = 0;
+ ht_id < map->n_max_ht_per_core;
+ ht_id++) {
+ uint32_t pos = (socket_id *
+ map->n_max_cores_per_socket + core_id) *
+ map->n_max_ht_per_core + ht_id;
+
+ if (((ht_id < map->n_ht_per_core) &&
+ (map->map[pos] == -1)) ||
+ ((ht_id >= map->n_ht_per_core) &&
+ (map->map[pos] != -1)))
+ return -1;
+ }
+
+ for ( ; core_id < map->n_max_cores_per_socket; core_id++)
+ for (ht_id = 0;
+ ht_id < map->n_max_ht_per_core;
+ ht_id++) {
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id,
+ ht_id);
+
+ if (map->map[pos] != -1)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#define FILE_LINUX_CPU_N_LCORES \
+ "/sys/devices/system/cpu/present"
+
+static int
+cpu_core_map_get_n_lcores_linux(void)
+{
+ char buffer[64], *string;
+ FILE *fd;
+
+ fd = fopen(FILE_LINUX_CPU_N_LCORES, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ string = index(buffer, '-');
+ if (string == NULL)
+ return -1;
+
+ return atoi(++string) + 1;
+}
+
+#define FILE_LINUX_CPU_CORE_ID \
+ "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_id"
+
+static int
+cpu_core_map_get_core_id_linux(int lcore_id)
+{
+ char buffer[64];
+ FILE *fd;
+ int core_id;
+
+ snprintf(buffer, sizeof(buffer), FILE_LINUX_CPU_CORE_ID, lcore_id);
+ fd = fopen(buffer, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ core_id = atoi(buffer);
+ return core_id;
+}
+
+#define FILE_LINUX_CPU_SOCKET_ID \
+ "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/physical_package_id"
+
+static int
+cpu_core_map_get_socket_id_linux(int lcore_id)
+{
+ char buffer[64];
+ FILE *fd;
+ int socket_id;
+
+ snprintf(buffer, sizeof(buffer), FILE_LINUX_CPU_SOCKET_ID, lcore_id);
+ fd = fopen(buffer, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ socket_id = atoi(buffer);
+ return socket_id;
+}
+
+int
+cpu_core_map_compute_linux(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+ int n_lcores;
+
+ n_lcores = cpu_core_map_get_n_lcores_linux();
+ if (n_lcores <= 0)
+ return -1;
+
+ /* Compute map */
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t n_detected, core_id_contig;
+ int lcore_id;
+
+ n_detected = 0;
+ for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
+ int lcore_socket_id =
+ cpu_core_map_get_socket_id_linux(lcore_id);
+
+ if (lcore_socket_id < 0)
+ return -1;
+
+ if (((uint32_t) lcore_socket_id) == socket_id)
+ n_detected++;
+ }
+
+ core_id_contig = 0;
+
+ for (core_id = 0; n_detected ; core_id++) {
+ ht_id = 0;
+
+ for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
+ int lcore_socket_id =
+ cpu_core_map_get_socket_id_linux(
+ lcore_id);
+
+ if (lcore_socket_id < 0)
+ return -1;
+
+ int lcore_core_id =
+ cpu_core_map_get_core_id_linux(
+ lcore_id);
+
+ if (lcore_core_id < 0)
+ return -1;
+
+ if (((uint32_t) lcore_socket_id == socket_id) &&
+ ((uint32_t) lcore_core_id == core_id)) {
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id_contig,
+ ht_id);
+
+ map->map[pos] = lcore_id;
+ ht_id++;
+ n_detected--;
+ }
+ }
+
+ if (ht_id) {
+ core_id_contig++;
+ if (core_id_contig ==
+ map->n_max_cores_per_socket)
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+cpu_core_map_print(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ if (map == NULL)
+ return;
+
+ for (socket_id = 0; socket_id < map->n_sockets; socket_id++) {
+ printf("Socket %" PRIu32 ":\n", socket_id);
+
+ for (core_id = 0;
+ core_id < map->n_cores_per_socket;
+ core_id++) {
+ printf("[%" PRIu32 "] = [", core_id);
+
+ for (ht_id = 0; ht_id < map->n_ht_per_core; ht_id++) {
+ int lcore_id = cpu_core_map_get_lcore_id(map,
+ socket_id,
+ core_id,
+ ht_id);
+
+ uint32_t core_id_noncontig =
+ cpu_core_map_get_core_id_linux(
+ lcore_id);
+
+ printf(" %" PRId32 " (%" PRIu32 ") ",
+ lcore_id,
+ core_id_noncontig);
+ }
+
+ printf("]\n");
+ }
+ }
+}
+
+uint32_t
+cpu_core_map_get_n_sockets(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_sockets;
+}
+
+uint32_t
+cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_cores_per_socket;
+}
+
+uint32_t
+cpu_core_map_get_n_ht_per_core(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_ht_per_core;
+}
+
+int
+cpu_core_map_get_lcore_id(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id)
+{
+ uint32_t pos;
+
+ if ((map == NULL) ||
+ (socket_id >= map->n_sockets) ||
+ (core_id >= map->n_cores_per_socket) ||
+ (ht_id >= map->n_ht_per_core))
+ return -1;
+
+ pos = cpu_core_map_pos(map, socket_id, core_id, ht_id);
+
+ return map->map[pos];
+}
+
+void
+cpu_core_map_free(struct cpu_core_map *map)
+{
+ free(map);
+}
diff --git a/examples/ip_pipeline/cpu_core_map.h b/examples/ip_pipeline/cpu_core_map.h
new file mode 100644
index 00000000..5c2ec729
--- /dev/null
+++ b/examples/ip_pipeline/cpu_core_map.h
@@ -0,0 +1,69 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_CPU_CORE_MAP_H__
+#define __INCLUDE_CPU_CORE_MAP_H__
+
+#include <stdio.h>
+
+#include <rte_lcore.h>
+
+struct cpu_core_map;
+
+struct cpu_core_map *
+cpu_core_map_init(uint32_t n_max_sockets,
+ uint32_t n_max_cores_per_socket,
+ uint32_t n_max_ht_per_core,
+ uint32_t eal_initialized);
+
+uint32_t
+cpu_core_map_get_n_sockets(struct cpu_core_map *map);
+
+uint32_t
+cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map);
+
+uint32_t
+cpu_core_map_get_n_ht_per_core(struct cpu_core_map *map);
+
+int
+cpu_core_map_get_lcore_id(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id);
+
+void cpu_core_map_print(struct cpu_core_map *map);
+
+void
+cpu_core_map_free(struct cpu_core_map *map);
+
+#endif
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
new file mode 100644
index 00000000..83422e88
--- /dev/null
+++ b/examples/ip_pipeline/init.c
@@ -0,0 +1,1637 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+
+#include "app.h"
+#include "pipeline.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_master.h"
+#include "pipeline_passthrough.h"
+#include "pipeline_firewall.h"
+#include "pipeline_flow_classification.h"
+#include "pipeline_flow_actions.h"
+#include "pipeline_routing.h"
+#include "thread_fe.h"
+
+#define APP_NAME_SIZE 32
+
+static void
+app_init_core_map(struct app_params *app)
+{
+ APP_LOG(app, HIGH, "Initializing CPU core map ...");
+ app->core_map = cpu_core_map_init(4, 32, 4, 0);
+
+ if (app->core_map == NULL)
+ rte_panic("Cannot create CPU core map\n");
+
+ if (app->log_level >= APP_LOG_LEVEL_LOW)
+ cpu_core_map_print(app->core_map);
+}
+
+static void
+app_init_core_mask(struct app_params *app)
+{
+ uint64_t mask = 0;
+ uint32_t i;
+
+ for (i = 0; i < app->n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ p->socket_id,
+ p->core_id,
+ p->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("Cannot create CPU core mask\n");
+
+ mask |= 1LLU << lcore_id;
+ }
+
+ app->core_mask = mask;
+ APP_LOG(app, HIGH, "CPU core mask = 0x%016" PRIx64, app->core_mask);
+}
+
+static void
+app_init_eal(struct app_params *app)
+{
+ char buffer[256];
+ struct app_eal_params *p = &app->eal_params;
+ uint32_t n_args = 0;
+ uint32_t i;
+ int status;
+
+ app->eal_argv[n_args++] = strdup(app->app_name);
+
+ snprintf(buffer, sizeof(buffer), "-c%" PRIx64, app->core_mask);
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ if (p->coremap) {
+ snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->master_lcore_present) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--master-lcore=%" PRIu32,
+ p->master_lcore);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ if (p->memory_present) {
+ snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->ranks_present) {
+ snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-blacklist=%s",
+ p->pci_blacklist[i]);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (app->port_mask != 0)
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-whitelist=%s",
+ p->pci_whitelist[i]);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+ else
+ for (i = 0; i < app->n_links; i++) {
+ char *pci_bdf = app->link_params[i].pci_bdf;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-whitelist=%s",
+ pci_bdf);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--vdev=%s",
+ p->vdev[i]);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
+ snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->proc_type) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--proc-type=%s",
+ p->proc_type);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->syslog) {
+ snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->log_level_present) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--log-level=%" PRIu32,
+ p->log_level);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->version_present) && p->version) {
+ snprintf(buffer, sizeof(buffer), "-v");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->help_present) && p->help) {
+ snprintf(buffer, sizeof(buffer), "--help");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_huge_present) && p->no_huge) {
+ snprintf(buffer, sizeof(buffer), "--no-huge");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_pci_present) && p->no_pci) {
+ snprintf(buffer, sizeof(buffer), "--no-pci");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_hpet_present) && p->no_hpet) {
+ snprintf(buffer, sizeof(buffer), "--no-hpet");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_shconf_present) && p->no_shconf) {
+ snprintf(buffer, sizeof(buffer), "--no-shconf");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->add_driver) {
+ snprintf(buffer, sizeof(buffer), "-d=%s", p->add_driver);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->socket_mem) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--socket-mem=%s",
+ p->socket_mem);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->huge_dir) {
+ snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->file_prefix) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--file-prefix=%s",
+ p->file_prefix);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->base_virtaddr) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--base-virtaddr=%s",
+ p->base_virtaddr);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->create_uio_dev_present) && p->create_uio_dev) {
+ snprintf(buffer, sizeof(buffer), "--create-uio-dev");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->vfio_intr) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--vfio-intr=%s",
+ p->vfio_intr);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->xen_dom0_present) && (p->xen_dom0)) {
+ snprintf(buffer, sizeof(buffer), "--xen-dom0");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ snprintf(buffer, sizeof(buffer), "--");
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ app->eal_argc = n_args;
+
+ APP_LOG(app, HIGH, "Initializing EAL ...");
+ if (app->log_level >= APP_LOG_LEVEL_LOW) {
+ int i;
+
+ fprintf(stdout, "[APP] EAL arguments: \"");
+ for (i = 1; i < app->eal_argc; i++)
+ fprintf(stdout, "%s ", app->eal_argv[i]);
+ fprintf(stdout, "\"\n");
+ }
+
+ status = rte_eal_init(app->eal_argc, app->eal_argv);
+ if (status < 0)
+ rte_panic("EAL init error\n");
+}
+
+static void
+app_init_mempool(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_mempools; i++) {
+ struct app_mempool_params *p = &app->mempool_params[i];
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p->name);
+ app->mempool[i] = rte_mempool_create(
+ p->name,
+ p->pool_size,
+ p->buffer_size,
+ p->cache_size,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ p->cpu_socket_id,
+ 0);
+
+ if (app->mempool[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+
+static inline int
+app_link_filter_arp_add(struct app_link_params *link)
+{
+ struct rte_eth_ethertype_filter filter = {
+ .ether_type = ETHER_TYPE_ARP,
+ .flags = 0,
+ .queue = link->arp_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(link->pmd_id,
+ RTE_ETH_FILTER_ETHERTYPE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_syn_add(struct app_link_params *link)
+{
+ struct rte_eth_syn_filter filter = {
+ .hig_pri = 1,
+ .queue = link->tcp_syn_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(link->pmd_id,
+ RTE_ETH_FILTER_SYN,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = 0,
+ .proto_mask = 0, /* Disable */
+ .tcp_flags = 0,
+ .priority = 1, /* Lowest */
+ .queue = l1->ip_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = 0,
+ .proto_mask = 0, /* Disable */
+ .tcp_flags = 0,
+ .priority = 1, /* Lowest */
+ .queue = l1->ip_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_TCP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->tcp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_TCP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->tcp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_UDP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->udp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_UDP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->udp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_SCTP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->sctp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_SCTP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->sctp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static void
+app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
+{
+ if (cp->arp_q != 0) {
+ int status = app_link_filter_arp_add(cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding ARP filter (queue = %" PRIu32 ")",
+ cp->name, cp->pmd_id, cp->arp_q);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding ARP filter "
+ "(queue = %" PRIu32 ") (%" PRId32 ")\n",
+ cp->name, cp->pmd_id, cp->arp_q, status);
+ }
+}
+
+static void
+app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
+{
+ if (cp->tcp_syn_q != 0) {
+ int status = app_link_filter_tcp_syn_add(cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding TCP SYN filter (queue = %" PRIu32 ")",
+ cp->name, cp->pmd_id, cp->tcp_syn_q);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding TCP SYN filter "
+ "(queue = %" PRIu32 ") (%" PRId32 ")\n",
+ cp->name, cp->pmd_id, cp->tcp_syn_q,
+ status);
+ }
+}
+
+static int
+app_link_is_virtual(struct app_link_params *p)
+{
+ uint32_t pmd_id = p->pmd_id;
+ struct rte_eth_dev *dev = &rte_eth_devices[pmd_id];
+
+ if (dev->dev_type == RTE_ETH_DEV_VIRTUAL)
+ return 1;
+
+ return 0;
+}
+
+void
+app_link_up_internal(struct app_params *app, struct app_link_params *cp)
+{
+ uint32_t i;
+ int status;
+
+ if (app_link_is_virtual(cp)) {
+ cp->state = 1;
+ return;
+ }
+
+ /* For each link, add filters for IP of current link */
+ if (cp->ip != 0) {
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+
+ /* IP */
+ if (p->ip_local_q != 0) {
+ int status = app_link_filter_ip_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding IP filter (queue= %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->ip_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding IP "
+ "filter (queue= %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->ip_local_q, cp->ip, status);
+ }
+
+ /* TCP */
+ if (p->tcp_local_q != 0) {
+ int status = app_link_filter_tcp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->tcp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding TCP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->tcp_local_q, cp->ip, status);
+ }
+
+ /* UDP */
+ if (p->udp_local_q != 0) {
+ int status = app_link_filter_udp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding UDP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->udp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding UDP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->udp_local_q, cp->ip, status);
+ }
+
+ /* SCTP */
+ if (p->sctp_local_q != 0) {
+ int status = app_link_filter_sctp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Adding SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->sctp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding SCTP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->sctp_local_q, cp->ip,
+ status);
+ }
+ }
+ }
+
+ /* PMD link up */
+ status = rte_eth_dev_set_link_up(cp->pmd_id);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): PMD set link up error %"
+ PRId32 "\n", cp->name, cp->pmd_id, status);
+
+ /* Mark link as UP */
+ cp->state = 1;
+}
+
+void
+app_link_down_internal(struct app_params *app, struct app_link_params *cp)
+{
+ uint32_t i;
+ int status;
+
+ if (app_link_is_virtual(cp)) {
+ cp->state = 0;
+ return;
+ }
+
+ /* PMD link down */
+ status = rte_eth_dev_set_link_down(cp->pmd_id);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): PMD set link down error %"
+ PRId32 "\n", cp->name, cp->pmd_id, status);
+
+ /* Mark link as DOWN */
+ cp->state = 0;
+
+ /* Return if current link IP is not valid */
+ if (cp->ip == 0)
+ return;
+
+ /* For each link, remove filters for IP of current link */
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+
+ /* IP */
+ if (p->ip_local_q != 0) {
+ int status = app_link_filter_ip_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting IP filter "
+ "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->ip_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting IP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->ip_local_q,
+ cp->ip, status);
+ }
+
+ /* TCP */
+ if (p->tcp_local_q != 0) {
+ int status = app_link_filter_tcp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->tcp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->tcp_local_q,
+ cp->ip, status);
+ }
+
+ /* UDP */
+ if (p->udp_local_q != 0) {
+ int status = app_link_filter_udp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting UDP filter "
+ "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->udp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting UDP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->udp_local_q,
+ cp->ip, status);
+ }
+
+ /* SCTP */
+ if (p->sctp_local_q != 0) {
+ int status = app_link_filter_sctp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->sctp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->sctp_local_q,
+ cp->ip, status);
+ }
+ }
+}
+
+static void
+app_check_link(struct app_params *app)
+{
+ uint32_t all_links_up, i;
+
+ all_links_up = 1;
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+ struct rte_eth_link link_params;
+
+ memset(&link_params, 0, sizeof(link_params));
+ rte_eth_link_get(p->pmd_id, &link_params);
+
+ APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
+ p->name,
+ p->pmd_id,
+ link_params.link_speed / 1000,
+ link_params.link_status ? "UP" : "DOWN");
+
+ if (link_params.link_status == ETH_LINK_DOWN)
+ all_links_up = 0;
+ }
+
+ if (all_links_up == 0)
+ rte_panic("Some links are DOWN\n");
+}
+
+static uint32_t
+is_any_swq_frag_or_ras(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+
+ if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
+ (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+app_init_link_frag_ras(struct app_params *app)
+{
+ uint32_t i;
+
+ if (is_any_swq_frag_or_ras(app)) {
+ for (i = 0; i < app->n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i];
+
+ p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
+ }
+ }
+}
+
+static inline int
+app_get_cpu_socket_id(uint32_t pmd_id)
+{
+ int status = rte_eth_dev_socket_id(pmd_id);
+
+ return (status != SOCKET_ID_ANY) ? status : 0;
+}
+
+static void
+app_init_link(struct app_params *app)
+{
+ uint32_t i;
+
+ app_init_link_frag_ras(app);
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p_link = &app->link_params[i];
+ uint32_t link_id, n_hwq_in, n_hwq_out, j;
+ int status;
+
+ sscanf(p_link->name, "LINK%" PRIu32, &link_id);
+ n_hwq_in = app_link_get_n_rxq(app, p_link);
+ n_hwq_out = app_link_get_n_txq(app, p_link);
+
+ APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
+ "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
+ p_link->name,
+ p_link->pmd_id,
+ n_hwq_in,
+ n_hwq_out);
+
+ /* LINK */
+ status = rte_eth_dev_configure(
+ p_link->pmd_id,
+ n_hwq_in,
+ n_hwq_out,
+ &p_link->conf);
+ if (status < 0)
+ rte_panic("%s (%" PRId32 "): "
+ "init error (%" PRId32 ")\n",
+ p_link->name, p_link->pmd_id, status);
+
+ rte_eth_macaddr_get(p_link->pmd_id,
+ (struct ether_addr *) &p_link->mac_addr);
+
+ if (p_link->promisc)
+ rte_eth_promiscuous_enable(p_link->pmd_id);
+
+ /* RXQ */
+ for (j = 0; j < app->n_pktq_hwq_in; j++) {
+ struct app_pktq_hwq_in_params *p_rxq =
+ &app->hwq_in_params[j];
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
+ &rxq_link_id, &rxq_queue_id);
+ if (rxq_link_id != link_id)
+ continue;
+
+ status = rte_eth_rx_queue_setup(
+ p_link->pmd_id,
+ rxq_queue_id,
+ p_rxq->size,
+ app_get_cpu_socket_id(p_link->pmd_id),
+ &p_rxq->conf,
+ app->mempool[p_rxq->mempool_id]);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s init error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_rxq->name,
+ status);
+ }
+
+ /* TXQ */
+ for (j = 0; j < app->n_pktq_hwq_out; j++) {
+ struct app_pktq_hwq_out_params *p_txq =
+ &app->hwq_out_params[j];
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
+ &txq_link_id, &txq_queue_id);
+ if (txq_link_id != link_id)
+ continue;
+
+ status = rte_eth_tx_queue_setup(
+ p_link->pmd_id,
+ txq_queue_id,
+ p_txq->size,
+ app_get_cpu_socket_id(p_link->pmd_id),
+ &p_txq->conf);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s init error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_txq->name,
+ status);
+ }
+
+ /* LINK START */
+ status = rte_eth_dev_start(p_link->pmd_id);
+ if (status < 0)
+ rte_panic("Cannot start %s (error %" PRId32 ")\n",
+ p_link->name, status);
+
+ /* LINK UP */
+ app_link_set_arp_filter(app, p_link);
+ app_link_set_tcp_syn_filter(app, p_link);
+ app_link_up_internal(app, p_link);
+ }
+
+ app_check_link(app);
+}
+
+static void
+app_init_swq(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+ unsigned flags = 0;
+
+ if (app_swq_get_readers(app, p) == 1)
+ flags |= RING_F_SC_DEQ;
+ if (app_swq_get_writers(app, p) == 1)
+ flags |= RING_F_SP_ENQ;
+
+ APP_LOG(app, HIGH, "Initializing %s...", p->name);
+ app->swq[i] = rte_ring_create(
+ p->name,
+ p->size,
+ p->cpu_socket_id,
+ flags);
+
+ if (app->swq[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+
+static void
+app_init_tm(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_tm; i++) {
+ struct app_pktq_tm_params *p_tm = &app->tm_params[i];
+ struct app_link_params *p_link;
+ struct rte_eth_link link_eth_params;
+ struct rte_sched_port *sched;
+ uint32_t n_subports, subport_id;
+ int status;
+
+ p_link = app_get_link_for_tm(app, p_tm);
+ /* LINK */
+ rte_eth_link_get(p_link->pmd_id, &link_eth_params);
+
+ /* TM */
+ p_tm->sched_port_params.name = p_tm->name;
+ p_tm->sched_port_params.socket =
+ app_get_cpu_socket_id(p_link->pmd_id);
+ p_tm->sched_port_params.rate =
+ (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
+ sched = rte_sched_port_config(&p_tm->sched_port_params);
+ if (sched == NULL)
+ rte_panic("%s init error\n", p_tm->name);
+ app->tm[i] = sched;
+
+ /* Subport */
+ n_subports = p_tm->sched_port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport, pipe_id;
+
+ status = rte_sched_subport_config(sched,
+ subport_id,
+ &p_tm->sched_subport_params[subport_id]);
+ if (status)
+ rte_panic("%s subport %" PRIu32
+ " init error (%" PRId32 ")\n",
+ p_tm->name, subport_id, status);
+
+ /* Pipe */
+ n_pipes_per_subport =
+ p_tm->sched_port_params.n_pipes_per_subport;
+ for (pipe_id = 0;
+ pipe_id < n_pipes_per_subport;
+ pipe_id++) {
+ int profile_id = p_tm->sched_pipe_to_profile[
+ subport_id * APP_MAX_SCHED_PIPES +
+ pipe_id];
+
+ if (profile_id == -1)
+ continue;
+
+ status = rte_sched_pipe_config(sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status)
+ rte_panic("%s subport %" PRIu32
+ " pipe %" PRIu32
+ " (profile %" PRId32 ") "
+ "init error (% " PRId32 ")\n",
+ p_tm->name, subport_id, pipe_id,
+ profile_id, status);
+ }
+ }
+ }
+}
+
+static void
+app_init_msgq(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_msgq; i++) {
+ struct app_msgq_params *p = &app->msgq_params[i];
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p->name);
+ app->msgq[i] = rte_ring_create(
+ p->name,
+ p->size,
+ p->cpu_socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (app->msgq[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+
+static void app_pipeline_params_get(struct app_params *app,
+ struct app_pipeline_params *p_in,
+ struct pipeline_params *p_out)
+{
+ uint32_t i;
+ uint32_t mempool_id;
+
+ snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
+
+ p_out->socket_id = (int) p_in->socket_id;
+
+ p_out->log_level = app->log_level;
+
+ /* pktq_in */
+ p_out->n_ports_in = p_in->n_pktq_in;
+ for (i = 0; i < p_in->n_pktq_in; i++) {
+ struct app_pktq_in_params *in = &p_in->pktq_in[i];
+ struct pipeline_port_in_params *out = &p_out->port_in[i];
+
+ switch (in->type) {
+ case APP_PKTQ_IN_HWQ:
+ {
+ struct app_pktq_hwq_in_params *p_hwq_in =
+ &app->hwq_in_params[in->id];
+ struct app_link_params *p_link =
+ app_get_link_for_rxq(app, p_hwq_in);
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id,
+ &rxq_queue_id);
+
+ out->type = PIPELINE_PORT_IN_ETHDEV_READER;
+ out->params.ethdev.port_id = p_link->pmd_id;
+ out->params.ethdev.queue_id = rxq_queue_id;
+ out->burst_size = p_hwq_in->burst;
+ break;
+ }
+ case APP_PKTQ_IN_SWQ:
+ {
+ struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
+
+ if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
+ if (app_swq_get_readers(app, swq_params) == 1) {
+ out->type = PIPELINE_PORT_IN_RING_READER;
+ out->params.ring.ring = app->swq[in->id];
+ out->burst_size = app->swq_params[in->id].burst_read;
+ } else {
+ out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
+ out->params.ring_multi.ring = app->swq[in->id];
+ out->burst_size = swq_params->burst_read;
+ }
+ } else {
+ if (swq_params->ipv4_frag == 1) {
+ struct rte_port_ring_reader_ipv4_frag_params *params =
+ &out->params.ring_ipv4_frag;
+
+ out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
+ params->ring = app->swq[in->id];
+ params->mtu = swq_params->mtu;
+ params->metadata_size = swq_params->metadata_size;
+ params->pool_direct =
+ app->mempool[swq_params->mempool_direct_id];
+ params->pool_indirect =
+ app->mempool[swq_params->mempool_indirect_id];
+ out->burst_size = swq_params->burst_read;
+ } else {
+ struct rte_port_ring_reader_ipv6_frag_params *params =
+ &out->params.ring_ipv6_frag;
+
+ out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
+ params->ring = app->swq[in->id];
+ params->mtu = swq_params->mtu;
+ params->metadata_size = swq_params->metadata_size;
+ params->pool_direct =
+ app->mempool[swq_params->mempool_direct_id];
+ params->pool_indirect =
+ app->mempool[swq_params->mempool_indirect_id];
+ out->burst_size = swq_params->burst_read;
+ }
+ }
+ break;
+ }
+ case APP_PKTQ_IN_TM:
+ out->type = PIPELINE_PORT_IN_SCHED_READER;
+ out->params.sched.sched = app->tm[in->id];
+ out->burst_size = app->tm_params[in->id].burst_read;
+ break;
+ case APP_PKTQ_IN_SOURCE:
+ mempool_id = app->source_params[in->id].mempool_id;
+ out->type = PIPELINE_PORT_IN_SOURCE;
+ out->params.source.mempool = app->mempool[mempool_id];
+ out->burst_size = app->source_params[in->id].burst;
+
+#ifdef RTE_NEXT_ABI
+ if (app->source_params[in->id].file_name
+ != NULL) {
+ out->params.source.file_name = strdup(
+ app->source_params[in->id].
+ file_name);
+ if (out->params.source.file_name == NULL) {
+ out->params.source.
+ n_bytes_per_pkt = 0;
+ break;
+ }
+ out->params.source.n_bytes_per_pkt =
+ app->source_params[in->id].
+ n_bytes_per_pkt;
+ }
+#endif
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* pktq_out */
+ p_out->n_ports_out = p_in->n_pktq_out;
+ for (i = 0; i < p_in->n_pktq_out; i++) {
+ struct app_pktq_out_params *in = &p_in->pktq_out[i];
+ struct pipeline_port_out_params *out = &p_out->port_out[i];
+
+ switch (in->type) {
+ case APP_PKTQ_OUT_HWQ:
+ {
+ struct app_pktq_hwq_out_params *p_hwq_out =
+ &app->hwq_out_params[in->id];
+ struct app_link_params *p_link =
+ app_get_link_for_txq(app, p_hwq_out);
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p_hwq_out->name,
+ "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id,
+ &txq_queue_id);
+
+ if (p_hwq_out->dropless == 0) {
+ struct rte_port_ethdev_writer_params *params =
+ &out->params.ethdev;
+
+ out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
+ params->port_id = p_link->pmd_id;
+ params->queue_id = txq_queue_id;
+ params->tx_burst_sz =
+ app->hwq_out_params[in->id].burst;
+ } else {
+ struct rte_port_ethdev_writer_nodrop_params
+ *params = &out->params.ethdev_nodrop;
+
+ out->type =
+ PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
+ params->port_id = p_link->pmd_id;
+ params->queue_id = txq_queue_id;
+ params->tx_burst_sz = p_hwq_out->burst;
+ params->n_retries = p_hwq_out->n_retries;
+ }
+ break;
+ }
+ case APP_PKTQ_OUT_SWQ:
+ {
+ struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
+
+ if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
+ if (app_swq_get_writers(app, swq_params) == 1) {
+ if (app->swq_params[in->id].dropless == 0) {
+ struct rte_port_ring_writer_params *params =
+ &out->params.ring;
+
+ out->type = PIPELINE_PORT_OUT_RING_WRITER;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz =
+ app->swq_params[in->id].burst_write;
+ } else {
+ struct rte_port_ring_writer_nodrop_params
+ *params = &out->params.ring_nodrop;
+
+ out->type =
+ PIPELINE_PORT_OUT_RING_WRITER_NODROP;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz =
+ app->swq_params[in->id].burst_write;
+ params->n_retries =
+ app->swq_params[in->id].n_retries;
+ }
+ } else {
+ if (swq_params->dropless == 0) {
+ struct rte_port_ring_multi_writer_params *params =
+ &out->params.ring_multi;
+
+ out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ } else {
+ struct rte_port_ring_multi_writer_nodrop_params
+ *params = &out->params.ring_multi_nodrop;
+
+ out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ params->n_retries = swq_params->n_retries;
+ }
+ }
+ } else {
+ if (swq_params->ipv4_ras == 1) {
+ struct rte_port_ring_writer_ipv4_ras_params *params =
+ &out->params.ring_ipv4_ras;
+
+ out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ } else {
+ struct rte_port_ring_writer_ipv6_ras_params *params =
+ &out->params.ring_ipv6_ras;
+
+ out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ }
+ }
+ break;
+ }
+ case APP_PKTQ_OUT_TM: {
+ struct rte_port_sched_writer_params *params =
+ &out->params.sched;
+
+ out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
+ params->sched = app->tm[in->id];
+ params->tx_burst_sz =
+ app->tm_params[in->id].burst_write;
+ break;
+ }
+ case APP_PKTQ_OUT_SINK:
+ out->type = PIPELINE_PORT_OUT_SINK;
+ if (app->sink_params[in->id].file_name != NULL) {
+ out->params.sink.file_name = strdup(
+ app->sink_params[in->id].
+ file_name);
+ if (out->params.sink.file_name == NULL) {
+ out->params.sink.max_n_pkts = 0;
+ break;
+ }
+ out->params.sink.max_n_pkts =
+ app->sink_params[in->id].
+ n_pkts_to_dump;
+ } else {
+ out->params.sink.file_name = NULL;
+ out->params.sink.max_n_pkts = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* msgq */
+ p_out->n_msgq = p_in->n_msgq_in;
+
+ for (i = 0; i < p_in->n_msgq_in; i++)
+ p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
+
+ for (i = 0; i < p_in->n_msgq_out; i++)
+ p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
+
+ /* args */
+ p_out->n_args = p_in->n_args;
+ for (i = 0; i < p_in->n_args; i++) {
+ p_out->args_name[i] = p_in->args_name[i];
+ p_out->args_value[i] = p_in->args_value[i];
+ }
+}
+
+static void
+app_init_pipelines(struct app_params *app)
+{
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ struct pipeline_type *ptype;
+ struct pipeline_params pp;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", params->name);
+
+ ptype = app_pipeline_type_find(app, params->type);
+ if (ptype == NULL)
+ rte_panic("Init error: Unknown pipeline type \"%s\"\n",
+ params->type);
+
+ app_pipeline_params_get(app, params, &pp);
+
+ /* Back-end */
+ data->be = NULL;
+ if (ptype->be_ops->f_init) {
+ data->be = ptype->be_ops->f_init(&pp, (void *) app);
+
+ if (data->be == NULL)
+ rte_panic("Pipeline instance \"%s\" back-end "
+ "init error\n", params->name);
+ }
+
+ /* Front-end */
+ data->fe = NULL;
+ if (ptype->fe_ops->f_init) {
+ data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
+
+ if (data->fe == NULL)
+ rte_panic("Pipeline instance \"%s\" front-end "
+ "init error\n", params->name);
+ }
+
+ data->ptype = ptype;
+
+ data->timer_period = (rte_get_tsc_hz() *
+ params->timer_period) / 100;
+ }
+}
+
+static void
+app_init_threads(struct app_params *app)
+{
+ uint64_t time = rte_get_tsc_cycles();
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ struct pipeline_type *ptype;
+ struct app_thread_data *t;
+ struct app_thread_pipeline_data *p;
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
+ params->socket_id,
+ params->core_id,
+ (params->hyper_th_id) ? "h" : "");
+
+ t = &app->thread_data[lcore_id];
+
+ t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
+ t->thread_req_deadline = time + t->timer_period;
+
+ t->headroom_cycles = 0;
+ t->headroom_time = rte_get_tsc_cycles();
+ t->headroom_ratio = 0.0;
+
+ t->msgq_in = app_thread_msgq_in_get(app,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+ if (t->msgq_in == NULL)
+ rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
+ lcore_id);
+
+ t->msgq_out = app_thread_msgq_out_get(app,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+ if (t->msgq_out == NULL)
+ rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
+ lcore_id);
+
+ ptype = app_pipeline_type_find(app, params->type);
+ if (ptype == NULL)
+ rte_panic("Init error: Unknown pipeline "
+ "type \"%s\"\n", params->type);
+
+ p = (ptype->be_ops->f_run == NULL) ?
+ &t->regular[t->n_regular] :
+ &t->custom[t->n_custom];
+
+ p->pipeline_id = p_id;
+ p->be = data->be;
+ p->f_run = ptype->be_ops->f_run;
+ p->f_timer = ptype->be_ops->f_timer;
+ p->timer_period = data->timer_period;
+ p->deadline = time + data->timer_period;
+
+ data->enabled = 1;
+
+ if (ptype->be_ops->f_run == NULL)
+ t->n_regular++;
+ else
+ t->n_custom++;
+ }
+}
+
+int app_init(struct app_params *app)
+{
+ app_init_core_map(app);
+ app_init_core_mask(app);
+
+ app_init_eal(app);
+ app_init_mempool(app);
+ app_init_link(app);
+ app_init_swq(app);
+ app_init_tm(app);
+ app_init_msgq(app);
+
+ app_pipeline_common_cmd_push(app);
+ app_pipeline_thread_cmd_push(app);
+ app_pipeline_type_register(app, &pipeline_master);
+ app_pipeline_type_register(app, &pipeline_passthrough);
+ app_pipeline_type_register(app, &pipeline_flow_classification);
+ app_pipeline_type_register(app, &pipeline_flow_actions);
+ app_pipeline_type_register(app, &pipeline_firewall);
+ app_pipeline_type_register(app, &pipeline_routing);
+
+ app_init_pipelines(app);
+ app_init_threads(app);
+
+ return 0;
+}
+
+static int
+app_pipeline_type_cmd_push(struct app_params *app,
+ struct pipeline_type *ptype)
+{
+ cmdline_parse_ctx_t *cmds;
+ uint32_t n_cmds, i;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (ptype == NULL))
+ return -EINVAL;
+
+ n_cmds = pipeline_type_cmds_count(ptype);
+ if (n_cmds == 0)
+ return 0;
+
+ cmds = ptype->fe_ops->cmds;
+
+ /* Check for available slots in the application commands array */
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push pipeline commands into the application */
+ memcpy(&app->cmds[app->n_cmds],
+ cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
+
+int
+app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
+{
+ uint32_t n_cmds, i;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (ptype == NULL) ||
+ (ptype->name == NULL) ||
+ (strlen(ptype->name) == 0) ||
+ (ptype->be_ops->f_init == NULL) ||
+ (ptype->be_ops->f_timer == NULL))
+ return -EINVAL;
+
+ /* Check for duplicate entry */
+ for (i = 0; i < app->n_pipeline_types; i++)
+ if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
+ return -EEXIST;
+
+ /* Check for resource availability */
+ n_cmds = pipeline_type_cmds_count(ptype);
+ if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
+ (n_cmds > APP_MAX_CMDS - app->n_cmds))
+ return -ENOMEM;
+
+ /* Copy pipeline type */
+ memcpy(&app->pipeline_type[app->n_pipeline_types++],
+ ptype,
+ sizeof(struct pipeline_type));
+
+ /* Copy CLI commands */
+ if (n_cmds)
+ app_pipeline_type_cmd_push(app, ptype);
+
+ return 0;
+}
+
+struct
+pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pipeline_types; i++)
+ if (strcmp(app->pipeline_type[i].name, name) == 0)
+ return &app->pipeline_type[i];
+
+ return NULL;
+}
diff --git a/examples/ip_pipeline/main.c b/examples/ip_pipeline/main.c
new file mode 100644
index 00000000..4944dcfb
--- /dev/null
+++ b/examples/ip_pipeline/main.c
@@ -0,0 +1,64 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "app.h"
+
+static struct app_params app;
+
+int
+main(int argc, char **argv)
+{
+ rte_openlog_stream(stderr);
+
+ /* Config */
+ app_config_init(&app);
+
+ app_config_args(&app, argc, argv);
+
+ app_config_preproc(&app);
+
+ app_config_parse(&app, app.parser_file);
+
+ app_config_check(&app);
+
+ /* Init */
+ app_init(&app);
+
+ /* Run-time */
+ rte_eal_mp_remote_launch(
+ app_thread,
+ (void *) &app,
+ CALL_MASTER);
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/parser.h b/examples/ip_pipeline/parser.h
new file mode 100644
index 00000000..58b59daf
--- /dev/null
+++ b/examples/ip_pipeline/parser.h
@@ -0,0 +1,50 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PARSER_H__
+#define __INCLUDE_PARSER_H__
+
+int
+parser_read_arg_bool(const char *p);
+
+int
+parser_read_uint64(uint64_t *value, const char *p);
+
+int
+parser_read_uint32(uint32_t *value, const char *p);
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+#endif
+
diff --git a/examples/ip_pipeline/pipeline.h b/examples/ip_pipeline/pipeline.h
new file mode 100644
index 00000000..dab9c36d
--- /dev/null
+++ b/examples/ip_pipeline/pipeline.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_H__
+#define __INCLUDE_PIPELINE_H__
+
+#include <cmdline_parse.h>
+
+#include "pipeline_be.h"
+
+/*
+ * Pipeline type front-end operations
+ */
+
+typedef void* (*pipeline_fe_op_init)(struct pipeline_params *params, void *arg);
+
+typedef int (*pipeline_fe_op_free)(void *pipeline);
+
+struct pipeline_fe_ops {
+ pipeline_fe_op_init f_init;
+ pipeline_fe_op_free f_free;
+ cmdline_parse_ctx_t *cmds;
+};
+
+/*
+ * Pipeline type
+ */
+
+struct pipeline_type {
+ const char *name;
+
+ /* pipeline back-end */
+ struct pipeline_be_ops *be_ops;
+
+ /* pipeline front-end */
+ struct pipeline_fe_ops *fe_ops;
+};
+
+static inline uint32_t
+pipeline_type_cmds_count(struct pipeline_type *ptype)
+{
+ cmdline_parse_ctx_t *cmds;
+ uint32_t n_cmds;
+
+ if (ptype->fe_ops == NULL)
+ return 0;
+
+ cmds = ptype->fe_ops->cmds;
+ if (cmds == NULL)
+ return 0;
+
+ for (n_cmds = 0; cmds[n_cmds]; n_cmds++);
+
+ return n_cmds;
+}
+
+int
+parse_pipeline_core(uint32_t *socket,
+ uint32_t *core,
+ uint32_t *ht,
+ const char *entry);
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/hash_func.h b/examples/ip_pipeline/pipeline/hash_func.h
new file mode 100644
index 00000000..9db7173f
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/hash_func.h
@@ -0,0 +1,351 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __INCLUDE_HASH_FUNC_H__
+#define __INCLUDE_HASH_FUNC_H__
+
+static inline uint64_t
+hash_xor_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = seed ^ k[0];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+
+ xor0 ^= k[2];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+
+ xor0 ^= xor1;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+
+ xor0 ^= xor1;
+
+ xor0 ^= k[4];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+
+ xor0 ^= xor1;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+
+ xor0 ^= xor1;
+ xor2 ^= k[6];
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2, xor3;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+ xor3 = k[6] ^ k[7];
+
+ xor0 ^= xor1;
+ xor2 ^= xor3;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+#if defined(RTE_ARCH_X86_64) && defined(RTE_MACHINE_CPUFLAG_SSE4_2)
+
+#include <x86intrin.h>
+
+static inline uint64_t
+hash_crc_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t crc0;
+
+ crc0 = _mm_crc32_u64(seed, k[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, crc0, crc1;
+
+ k0 = k[0];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc0 = _mm_crc32_u64(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = k2 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, k5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6]);
+ crc5 = k5 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6]);
+ crc5 = _mm_crc32_u64(k5 >> 32, k[7]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#else
+
+#define hash_default_key8 hash_xor_key8
+#define hash_default_key16 hash_xor_key16
+#define hash_default_key24 hash_xor_key24
+#define hash_default_key32 hash_xor_key32
+#define hash_default_key40 hash_xor_key40
+#define hash_default_key48 hash_xor_key48
+#define hash_default_key56 hash_xor_key56
+#define hash_default_key64 hash_xor_key64
+
+#endif
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_actions_common.h b/examples/ip_pipeline/pipeline/pipeline_actions_common.h
new file mode 100644
index 00000000..ab08612d
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_actions_common.h
@@ -0,0 +1,231 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
+#define __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_mbuf.h>
+#include <rte_pipeline.h>
+
+#define PIPELINE_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], arg); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_PORT_IN_AH_HIJACK_ALL(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint64_t pkt_mask = RTE_LEN2MASK(n_pkts, uint64_t); \
+ uint32_t i; \
+ \
+ rte_pipeline_ah_packet_hijack(p, pkt_mask); \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], arg); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_HIT(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_in_mask, \
+ struct rte_pipeline_table_entry **entries, \
+ void *arg) \
+{ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], &entries[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], entries[i], arg); \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ f_pkt_work(pkts[pos], entries[pos], arg); \
+ } \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_MISS(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_in_mask, \
+ struct rte_pipeline_table_entry *entry, \
+ void *arg) \
+{ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], entry, arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], entry, arg); \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ f_pkt_work(pkts[pos], entry, arg); \
+ } \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_HIT_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_mask, \
+ struct rte_pipeline_table_entry **entries, \
+ void *arg) \
+{ \
+ uint64_t pkts_in_mask = pkts_mask; \
+ uint64_t pkts_out_mask = pkts_mask; \
+ uint64_t time = rte_rdtsc(); \
+ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
+ uint64_t mask = f_pkt4_work(&pkts[i], \
+ &entries[i], arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ \
+ for ( ; i < n_pkts; i++) { \
+ uint64_t mask = f_pkt_work(pkts[i], \
+ entries[i], arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ uint64_t mask = f_pkt_work(pkts[pos], \
+ entries[pos], arg, time); \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ pkts_out_mask ^= mask << pos; \
+ } \
+ \
+ rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_MISS_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_mask, \
+ struct rte_pipeline_table_entry *entry, \
+ void *arg) \
+{ \
+ uint64_t pkts_in_mask = pkts_mask; \
+ uint64_t pkts_out_mask = pkts_mask; \
+ uint64_t time = rte_rdtsc(); \
+ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
+ uint64_t mask = f_pkt4_work(&pkts[i], \
+ entry, arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ \
+ for ( ; i < n_pkts; i++) { \
+ uint64_t mask = f_pkt_work(pkts[i], entry, arg, time);\
+ pkts_out_mask ^= mask << i; \
+ } \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ uint64_t mask = f_pkt_work(pkts[pos], \
+ entry, arg, time); \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ pkts_out_mask ^= mask << pos; \
+ } \
+ \
+ rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
+ \
+ return 0; \
+}
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_be.c b/examples/ip_pipeline/pipeline/pipeline_common_be.c
new file mode 100644
index 00000000..50dcb694
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_common_be.c
@@ -0,0 +1,206 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_malloc.h>
+
+#include "pipeline_common_be.h"
+
+void *
+pipeline_msg_req_ping_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_msg_rsp *rsp = msg;
+
+ rsp->status = 0; /* OK */
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_port_in_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_port_in_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_port_out_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_port_out_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->id >= p->n_ports_out) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_out_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_table_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_table_msg_rsp *rsp = msg;
+ uint32_t table_id;
+
+ /* Check request */
+ if (req->id >= p->n_tables) {
+ rsp->status = -1;
+ return rsp;
+ }
+ table_id = p->table_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_table_stats_read(p->p,
+ table_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_port_in_enable_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_port_in_msg_req *req = msg;
+ struct pipeline_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->port_id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->port_id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_enable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_port_in_disable_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_port_in_msg_req *req = msg;
+ struct pipeline_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->port_id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->port_id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_disable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_invalid_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_msg_rsp *rsp = msg;
+
+ rsp->status = -1; /* Error */
+
+ return rsp;
+}
+
+int
+pipeline_msg_req_handle(struct pipeline *p)
+{
+ uint32_t msgq_id;
+
+ for (msgq_id = 0; msgq_id < p->n_msgq; msgq_id++) {
+ for ( ; ; ) {
+ struct pipeline_msg_req *req;
+ pipeline_msg_req_handler f_handle;
+
+ req = pipeline_msg_recv(p, msgq_id);
+ if (req == NULL)
+ break;
+
+ f_handle = (req->type < PIPELINE_MSG_REQS) ?
+ p->handlers[req->type] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ pipeline_msg_send(p,
+ msgq_id,
+ f_handle(p, (void *) req));
+ }
+ }
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_be.h b/examples/ip_pipeline/pipeline/pipeline_common_be.h
new file mode 100644
index 00000000..07fdca09
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_common_be.h
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_COMMON_BE_H__
+#define __INCLUDE_PIPELINE_COMMON_BE_H__
+
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_be.h"
+
+struct pipeline;
+
+enum pipeline_msg_req_type {
+ PIPELINE_MSG_REQ_PING = 0,
+ PIPELINE_MSG_REQ_STATS_PORT_IN,
+ PIPELINE_MSG_REQ_STATS_PORT_OUT,
+ PIPELINE_MSG_REQ_STATS_TABLE,
+ PIPELINE_MSG_REQ_PORT_IN_ENABLE,
+ PIPELINE_MSG_REQ_PORT_IN_DISABLE,
+ PIPELINE_MSG_REQ_CUSTOM,
+ PIPELINE_MSG_REQS
+};
+
+typedef void *(*pipeline_msg_req_handler)(struct pipeline *p, void *msg);
+
+struct pipeline {
+ struct rte_pipeline *p;
+ uint32_t port_in_id[PIPELINE_MAX_PORT_IN];
+ uint32_t port_out_id[PIPELINE_MAX_PORT_OUT];
+ uint32_t table_id[PIPELINE_MAX_TABLES];
+ struct rte_ring *msgq_in[PIPELINE_MAX_MSGQ_IN];
+ struct rte_ring *msgq_out[PIPELINE_MAX_MSGQ_OUT];
+
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_tables;
+ uint32_t n_msgq;
+
+ pipeline_msg_req_handler handlers[PIPELINE_MSG_REQS];
+ char name[PIPELINE_NAME_SIZE];
+ uint32_t log_level;
+};
+
+enum pipeline_log_level {
+ PIPELINE_LOG_LEVEL_HIGH = 1,
+ PIPELINE_LOG_LEVEL_LOW,
+ PIPELINE_LOG_LEVELS
+};
+
+#define PLOG(p, level, fmt, ...) \
+do { \
+ if (p->log_level >= PIPELINE_LOG_LEVEL_ ## level) \
+ fprintf(stdout, "[%s] " fmt "\n", p->name, ## __VA_ARGS__);\
+} while (0)
+
+static inline void *
+pipeline_msg_recv(struct pipeline *p,
+ uint32_t msgq_id)
+{
+ struct rte_ring *r = p->msgq_in[msgq_id];
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void
+pipeline_msg_send(struct pipeline *p,
+ uint32_t msgq_id,
+ void *msg)
+{
+ struct rte_ring *r = p->msgq_out[msgq_id];
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+struct pipeline_msg_req {
+ enum pipeline_msg_req_type type;
+};
+
+struct pipeline_stats_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t id;
+};
+
+struct pipeline_port_in_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t port_id;
+};
+
+struct pipeline_custom_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t subtype;
+};
+
+struct pipeline_msg_rsp {
+ int status;
+};
+
+struct pipeline_stats_port_in_msg_rsp {
+ int status;
+ struct rte_pipeline_port_in_stats stats;
+};
+
+struct pipeline_stats_port_out_msg_rsp {
+ int status;
+ struct rte_pipeline_port_out_stats stats;
+};
+
+struct pipeline_stats_table_msg_rsp {
+ int status;
+ struct rte_pipeline_table_stats stats;
+};
+
+void *pipeline_msg_req_ping_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_port_in_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_port_out_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_table_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_port_in_enable_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_port_in_disable_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_invalid_handler(struct pipeline *p, void *msg);
+
+int pipeline_msg_req_handle(struct pipeline *p);
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.c b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
new file mode 100644
index 00000000..a691d422
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
@@ -0,0 +1,1310 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "pipeline_common_fe.h"
+
+int
+app_pipeline_ping(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PING;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_stats_port_in(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_port_in_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_PORT_IN;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = (struct pipeline_stats_port_in_msg_rsp *)
+ app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_stats_port_out(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_port_out_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (pipeline_id >= app->n_pipelines) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_out))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_PORT_OUT;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_stats_table(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_table_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_TABLE;
+ req->id = table_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_port_in_enable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_port_in_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PORT_IN_ENABLE;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_port_in_disable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_port_in_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PORT_IN_DISABLE;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_link_config(struct app_params *app,
+ uint32_t link_id,
+ uint32_t ip,
+ uint32_t depth)
+{
+ struct app_link_params *p;
+ uint32_t i, netmask, host, bcast;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ if (p->state) {
+ APP_LOG(app, HIGH, "%s is UP, please bring it DOWN first",
+ p->name);
+ return -1;
+ }
+
+ netmask = (~0U) << (32 - depth);
+ host = ip & netmask;
+ bcast = host | (~netmask);
+
+ if ((ip == 0) ||
+ (ip == UINT32_MAX) ||
+ (ip == host) ||
+ (ip == bcast)) {
+ APP_LOG(app, HIGH, "Illegal IP address");
+ return -1;
+ }
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+
+ if (strcmp(p->name, link->name) == 0)
+ continue;
+
+ if (link->ip == ip) {
+ APP_LOG(app, HIGH,
+ "%s is already assigned this IP address",
+ link->name);
+ return -1;
+ }
+ }
+
+ if ((depth == 0) || (depth > 32)) {
+ APP_LOG(app, HIGH, "Illegal value for depth parameter "
+ "(%" PRIu32 ")",
+ depth);
+ return -1;
+ }
+
+ /* Save link parameters */
+ p->ip = ip;
+ p->depth = depth;
+
+ return 0;
+}
+
+int
+app_link_up(struct app_params *app,
+ uint32_t link_id)
+{
+ struct app_link_params *p;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ /* Check link state */
+ if (p->state) {
+ APP_LOG(app, HIGH, "%s is already UP", p->name);
+ return 0;
+ }
+
+ /* Check that IP address is valid */
+ if (p->ip == 0) {
+ APP_LOG(app, HIGH, "%s IP address is not set", p->name);
+ return 0;
+ }
+
+ app_link_up_internal(app, p);
+
+ return 0;
+}
+
+int
+app_link_down(struct app_params *app,
+ uint32_t link_id)
+{
+ struct app_link_params *p;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ /* Check link state */
+ if (p->state == 0) {
+ APP_LOG(app, HIGH, "%s is already DOWN", p->name);
+ return 0;
+ }
+
+ app_link_down_internal(app, p);
+
+ return 0;
+}
+
+/*
+ * ping
+ */
+
+struct cmd_ping_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t ping_string;
+};
+
+static void
+cmd_ping_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_ping_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_ping(app, params->pipeline_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_ping_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_ping_result, p_string, "p");
+
+cmdline_parse_token_num_t cmd_ping_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ping_result, pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_ping_ping_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_ping_result, ping_string, "ping");
+
+cmdline_parse_inst_t cmd_ping = {
+ .f = cmd_ping_parsed,
+ .data = NULL,
+ .help_str = "Pipeline ping",
+ .tokens = {
+ (void *) &cmd_ping_p_string,
+ (void *) &cmd_ping_pipeline_id,
+ (void *) &cmd_ping_ping_string,
+ NULL,
+ },
+};
+
+/*
+ * stats port in
+ */
+
+struct cmd_stats_port_in_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+
+};
+static void
+cmd_stats_port_in_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_stats_port_in_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_port_in_stats stats;
+ int status;
+
+ status = app_pipeline_stats_port_in(app,
+ params->pipeline_id,
+ params->port_in_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for input port %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts dropped by AH: %" PRIu64 "\n"
+ "\tPkts dropped by other: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->port_in_id,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+cmdline_parse_token_string_t cmd_stats_port_in_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_stats_port_in_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_stats_port_in_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, stats_string,
+ "stats");
+
+cmdline_parse_token_string_t cmd_stats_port_in_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, port_string,
+ "port");
+
+cmdline_parse_token_string_t cmd_stats_port_in_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, in_string,
+ "in");
+
+ cmdline_parse_token_num_t cmd_stats_port_in_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, port_in_id,
+ UINT32);
+
+cmdline_parse_inst_t cmd_stats_port_in = {
+ .f = cmd_stats_port_in_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port stats",
+ .tokens = {
+ (void *) &cmd_stats_port_in_p_string,
+ (void *) &cmd_stats_port_in_pipeline_id,
+ (void *) &cmd_stats_port_in_stats_string,
+ (void *) &cmd_stats_port_in_port_string,
+ (void *) &cmd_stats_port_in_in_string,
+ (void *) &cmd_stats_port_in_port_in_id,
+ NULL,
+ },
+};
+
+/*
+ * stats port out
+ */
+
+struct cmd_stats_port_out_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t out_string;
+ uint32_t port_out_id;
+};
+
+static void
+cmd_stats_port_out_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+
+ struct cmd_stats_port_out_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_port_out_stats stats;
+ int status;
+
+ status = app_pipeline_stats_port_out(app,
+ params->pipeline_id,
+ params->port_out_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for output port %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts dropped by AH: %" PRIu64 "\n"
+ "\tPkts dropped by other: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->port_out_id,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+cmdline_parse_token_string_t cmd_stats_port_out_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_stats_port_out_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_stats_port_out_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, stats_string,
+ "stats");
+
+cmdline_parse_token_string_t cmd_stats_port_out_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, port_string,
+ "port");
+
+cmdline_parse_token_string_t cmd_stats_port_out_out_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, out_string,
+ "out");
+
+cmdline_parse_token_num_t cmd_stats_port_out_port_out_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, port_out_id,
+ UINT32);
+
+cmdline_parse_inst_t cmd_stats_port_out = {
+ .f = cmd_stats_port_out_parsed,
+ .data = NULL,
+ .help_str = "Pipeline output port stats",
+ .tokens = {
+ (void *) &cmd_stats_port_out_p_string,
+ (void *) &cmd_stats_port_out_pipeline_id,
+ (void *) &cmd_stats_port_out_stats_string,
+ (void *) &cmd_stats_port_out_port_string,
+ (void *) &cmd_stats_port_out_out_string,
+ (void *) &cmd_stats_port_out_port_out_id,
+ NULL,
+ },
+};
+
+/*
+ * stats table
+ */
+
+struct cmd_stats_table_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t table_string;
+ uint32_t table_id;
+};
+
+static void
+cmd_stats_table_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_stats_table_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_table_stats stats;
+ int status;
+
+ status = app_pipeline_stats_table(app,
+ params->pipeline_id,
+ params->table_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for table %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts in with lookup miss: %" PRIu64 "\n"
+ "\tPkts in with lookup hit dropped by AH: %" PRIu64 "\n"
+ "\tPkts in with lookup hit dropped by others: %" PRIu64 "\n"
+ "\tPkts in with lookup miss dropped by AH: %" PRIu64 "\n"
+ "\tPkts in with lookup miss dropped by others: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->table_id,
+ stats.stats.n_pkts_in,
+ stats.stats.n_pkts_lookup_miss,
+ stats.n_pkts_dropped_by_lkp_hit_ah,
+ stats.n_pkts_dropped_lkp_hit,
+ stats.n_pkts_dropped_by_lkp_miss_ah,
+ stats.n_pkts_dropped_lkp_miss);
+}
+
+cmdline_parse_token_string_t cmd_stats_table_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_stats_table_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_stats_table_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, stats_string,
+ "stats");
+
+cmdline_parse_token_string_t cmd_stats_table_table_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, table_string,
+ "table");
+
+cmdline_parse_token_num_t cmd_stats_table_table_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, table_id, UINT32);
+
+cmdline_parse_inst_t cmd_stats_table = {
+ .f = cmd_stats_table_parsed,
+ .data = NULL,
+ .help_str = "Pipeline table stats",
+ .tokens = {
+ (void *) &cmd_stats_table_p_string,
+ (void *) &cmd_stats_table_pipeline_id,
+ (void *) &cmd_stats_table_stats_string,
+ (void *) &cmd_stats_table_table_string,
+ (void *) &cmd_stats_table_table_id,
+ NULL,
+ },
+};
+
+/*
+ * port in enable
+ */
+
+struct cmd_port_in_enable_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+ cmdline_fixed_string_t enable_string;
+};
+
+static void
+cmd_port_in_enable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_port_in_enable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_port_in_enable(app,
+ params->pipeline_id,
+ params->port_in_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_port_in_enable_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_port_in_enable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_port_in_enable_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, port_string,
+ "port");
+
+cmdline_parse_token_string_t cmd_port_in_enable_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, in_string,
+ "in");
+
+cmdline_parse_token_num_t cmd_port_in_enable_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, port_in_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_port_in_enable_enable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result,
+ enable_string, "enable");
+
+cmdline_parse_inst_t cmd_port_in_enable = {
+ .f = cmd_port_in_enable_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port enable",
+ .tokens = {
+ (void *) &cmd_port_in_enable_p_string,
+ (void *) &cmd_port_in_enable_pipeline_id,
+ (void *) &cmd_port_in_enable_port_string,
+ (void *) &cmd_port_in_enable_in_string,
+ (void *) &cmd_port_in_enable_port_in_id,
+ (void *) &cmd_port_in_enable_enable_string,
+ NULL,
+ },
+};
+
+/*
+ * port in disable
+ */
+
+struct cmd_port_in_disable_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+ cmdline_fixed_string_t disable_string;
+};
+
+static void
+cmd_port_in_disable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_port_in_disable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_port_in_disable(app,
+ params->pipeline_id,
+ params->port_in_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_port_in_disable_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_port_in_disable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_port_in_disable_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, port_string,
+ "port");
+
+cmdline_parse_token_string_t cmd_port_in_disable_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, in_string,
+ "in");
+
+cmdline_parse_token_num_t cmd_port_in_disable_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, port_in_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_port_in_disable_disable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result,
+ disable_string, "disable");
+
+cmdline_parse_inst_t cmd_port_in_disable = {
+ .f = cmd_port_in_disable_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port disable",
+ .tokens = {
+ (void *) &cmd_port_in_disable_p_string,
+ (void *) &cmd_port_in_disable_pipeline_id,
+ (void *) &cmd_port_in_disable_port_string,
+ (void *) &cmd_port_in_disable_in_string,
+ (void *) &cmd_port_in_disable_port_in_id,
+ (void *) &cmd_port_in_disable_disable_string,
+ NULL,
+ },
+};
+
+/*
+ * link config
+ */
+
+static void
+print_link_info(struct app_link_params *p)
+{
+ struct rte_eth_stats stats;
+ struct ether_addr *mac_addr;
+ uint32_t netmask = (~0U) << (32 - p->depth);
+ uint32_t host = p->ip & netmask;
+ uint32_t bcast = host | (~netmask);
+
+ memset(&stats, 0, sizeof(stats));
+ rte_eth_stats_get(p->pmd_id, &stats);
+
+ mac_addr = (struct ether_addr *) &p->mac_addr;
+
+ if (strlen(p->pci_bdf))
+ printf("%s(%s): flags=<%s>\n",
+ p->name,
+ p->pci_bdf,
+ (p->state) ? "UP" : "DOWN");
+ else
+ printf("%s: flags=<%s>\n",
+ p->name,
+ (p->state) ? "UP" : "DOWN");
+
+ if (p->ip)
+ printf("\tinet %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32
+ " netmask %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 " "
+ "broadcast %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 "\n",
+ (p->ip >> 24) & 0xFF,
+ (p->ip >> 16) & 0xFF,
+ (p->ip >> 8) & 0xFF,
+ p->ip & 0xFF,
+ (netmask >> 24) & 0xFF,
+ (netmask >> 16) & 0xFF,
+ (netmask >> 8) & 0xFF,
+ netmask & 0xFF,
+ (bcast >> 24) & 0xFF,
+ (bcast >> 16) & 0xFF,
+ (bcast >> 8) & 0xFF,
+ bcast & 0xFF);
+
+ printf("\tether %02" PRIx32 ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32 ":%02" PRIx32 "\n",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5]);
+
+ printf("\tRX packets %" PRIu64
+ " bytes %" PRIu64
+ "\n",
+ stats.ipackets,
+ stats.ibytes);
+
+ printf("\tRX errors %" PRIu64
+ " missed %" PRIu64
+ " no-mbuf %" PRIu64
+ "\n",
+ stats.ierrors,
+ stats.imissed,
+ stats.rx_nombuf);
+
+ printf("\tTX packets %" PRIu64
+ " bytes %" PRIu64 "\n",
+ stats.opackets,
+ stats.obytes);
+
+ printf("\tTX errors %" PRIu64
+ "\n",
+ stats.oerrors);
+
+ printf("\n");
+}
+
+struct cmd_link_config_result {
+ cmdline_fixed_string_t link_string;
+ uint32_t link_id;
+ cmdline_fixed_string_t config_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+};
+
+static void
+cmd_link_config_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_link_config_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ uint32_t link_id = params->link_id;
+ uint32_t ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ uint32_t depth = params->depth;
+
+ status = app_link_config(app, link_id, ip, depth);
+ if (status)
+ printf("Command failed\n");
+ else {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ print_link_info(p);
+ }
+}
+
+cmdline_parse_token_string_t cmd_link_config_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_config_result, link_string,
+ "link");
+
+cmdline_parse_token_num_t cmd_link_config_link_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_config_result, link_id, UINT32);
+
+cmdline_parse_token_string_t cmd_link_config_config_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_config_result, config_string,
+ "config");
+
+cmdline_parse_token_ipaddr_t cmd_link_config_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_link_config_result, ip);
+
+cmdline_parse_token_num_t cmd_link_config_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_config_result, depth, UINT32);
+
+cmdline_parse_inst_t cmd_link_config = {
+ .f = cmd_link_config_parsed,
+ .data = NULL,
+ .help_str = "Link configuration",
+ .tokens = {
+ (void *)&cmd_link_config_link_string,
+ (void *)&cmd_link_config_link_id,
+ (void *)&cmd_link_config_config_string,
+ (void *)&cmd_link_config_ip,
+ (void *)&cmd_link_config_depth,
+ NULL,
+ },
+};
+
+/*
+ * link up
+ */
+
+struct cmd_link_up_result {
+ cmdline_fixed_string_t link_string;
+ uint32_t link_id;
+ cmdline_fixed_string_t up_string;
+};
+
+static void
+cmd_link_up_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_link_up_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_link_up(app, params->link_id);
+ if (status != 0)
+ printf("Command failed\n");
+ else {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", params->link_id,
+ p);
+ print_link_info(p);
+ }
+}
+
+cmdline_parse_token_string_t cmd_link_up_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_up_result, link_string,
+ "link");
+
+cmdline_parse_token_num_t cmd_link_up_link_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_up_result, link_id, UINT32);
+
+cmdline_parse_token_string_t cmd_link_up_up_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_up_result, up_string, "up");
+
+cmdline_parse_inst_t cmd_link_up = {
+ .f = cmd_link_up_parsed,
+ .data = NULL,
+ .help_str = "Link UP",
+ .tokens = {
+ (void *)&cmd_link_up_link_string,
+ (void *)&cmd_link_up_link_id,
+ (void *)&cmd_link_up_up_string,
+ NULL,
+ },
+};
+
+/*
+ * link down
+ */
+
+struct cmd_link_down_result {
+ cmdline_fixed_string_t link_string;
+ uint32_t link_id;
+ cmdline_fixed_string_t down_string;
+};
+
+static void
+cmd_link_down_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_link_down_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_link_down(app, params->link_id);
+ if (status != 0)
+ printf("Command failed\n");
+ else {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", params->link_id,
+ p);
+ print_link_info(p);
+ }
+}
+
+cmdline_parse_token_string_t cmd_link_down_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_down_result, link_string,
+ "link");
+
+cmdline_parse_token_num_t cmd_link_down_link_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_down_result, link_id, UINT32);
+
+cmdline_parse_token_string_t cmd_link_down_down_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_down_result, down_string,
+ "down");
+
+cmdline_parse_inst_t cmd_link_down = {
+ .f = cmd_link_down_parsed,
+ .data = NULL,
+ .help_str = "Link DOWN",
+ .tokens = {
+ (void *) &cmd_link_down_link_string,
+ (void *) &cmd_link_down_link_id,
+ (void *) &cmd_link_down_down_string,
+ NULL,
+ },
+};
+
+/*
+ * link ls
+ */
+
+struct cmd_link_ls_result {
+ cmdline_fixed_string_t link_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+static void
+cmd_link_ls_parsed(
+ __attribute__((unused)) void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct app_params *app = data;
+ uint32_t link_id;
+
+ for (link_id = 0; link_id < app->n_links; link_id++) {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ print_link_info(p);
+ }
+}
+
+cmdline_parse_token_string_t cmd_link_ls_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_ls_result, link_string,
+ "link");
+
+cmdline_parse_token_string_t cmd_link_ls_ls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_ls_result, ls_string, "ls");
+
+cmdline_parse_inst_t cmd_link_ls = {
+ .f = cmd_link_ls_parsed,
+ .data = NULL,
+ .help_str = "Link list",
+ .tokens = {
+ (void *)&cmd_link_ls_link_string,
+ (void *)&cmd_link_ls_ls_string,
+ NULL,
+ },
+};
+
+/*
+ * quit
+ */
+
+struct cmd_quit_result {
+ cmdline_fixed_string_t quit;
+};
+
+static void
+cmd_quit_parsed(
+ __rte_unused void *parsed_result,
+ struct cmdline *cl,
+ __rte_unused void *data)
+{
+ cmdline_quit(cl);
+}
+
+static cmdline_parse_token_string_t cmd_quit_quit =
+ TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+static cmdline_parse_inst_t cmd_quit = {
+ .f = cmd_quit_parsed,
+ .data = NULL,
+ .help_str = "Quit",
+ .tokens = {
+ (void *) &cmd_quit_quit,
+ NULL,
+ },
+};
+
+/*
+ * run
+ */
+
+static void
+app_run_file(
+ cmdline_parse_ctx_t *ctx,
+ const char *file_name)
+{
+ struct cmdline *file_cl;
+ int fd;
+
+ fd = open(file_name, O_RDONLY);
+ if (fd < 0) {
+ printf("Cannot open file \"%s\"\n", file_name);
+ return;
+ }
+
+ file_cl = cmdline_new(ctx, "", fd, 1);
+ cmdline_interact(file_cl);
+ close(fd);
+}
+
+struct cmd_run_file_result {
+ cmdline_fixed_string_t run_string;
+ char file_name[APP_FILE_NAME_SIZE];
+};
+
+static void
+cmd_run_parsed(
+ void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_run_file_result *params = parsed_result;
+
+ app_run_file(cl->ctx, params->file_name);
+}
+
+cmdline_parse_token_string_t cmd_run_run_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_file_result, run_string,
+ "run");
+
+cmdline_parse_token_string_t cmd_run_file_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_file_result, file_name, NULL);
+
+cmdline_parse_inst_t cmd_run = {
+ .f = cmd_run_parsed,
+ .data = NULL,
+ .help_str = "Run CLI script file",
+ .tokens = {
+ (void *) &cmd_run_run_string,
+ (void *) &cmd_run_file_name,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_common_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_quit,
+ (cmdline_parse_inst_t *) &cmd_run,
+
+ (cmdline_parse_inst_t *) &cmd_link_config,
+ (cmdline_parse_inst_t *) &cmd_link_up,
+ (cmdline_parse_inst_t *) &cmd_link_down,
+ (cmdline_parse_inst_t *) &cmd_link_ls,
+
+ (cmdline_parse_inst_t *) &cmd_ping,
+ (cmdline_parse_inst_t *) &cmd_stats_port_in,
+ (cmdline_parse_inst_t *) &cmd_stats_port_out,
+ (cmdline_parse_inst_t *) &cmd_stats_table,
+ (cmdline_parse_inst_t *) &cmd_port_in_enable,
+ (cmdline_parse_inst_t *) &cmd_port_in_disable,
+ NULL,
+};
+
+int
+app_pipeline_common_cmd_push(struct app_params *app)
+{
+ uint32_t n_cmds, i;
+
+ /* Check for available slots in the application commands array */
+ n_cmds = RTE_DIM(pipeline_common_cmds) - 1;
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push pipeline commands into the application */
+ memcpy(&app->cmds[app->n_cmds],
+ pipeline_common_cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.h b/examples/ip_pipeline/pipeline/pipeline_common_fe.h
new file mode 100644
index 00000000..cfad963d
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_common_fe.h
@@ -0,0 +1,234 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_COMMON_FE_H__
+#define __INCLUDE_PIPELINE_COMMON_FE_H__
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <cmdline_parse.h>
+
+#include "pipeline_common_be.h"
+#include "pipeline.h"
+#include "app.h"
+
+#ifndef MSG_TIMEOUT_DEFAULT
+#define MSG_TIMEOUT_DEFAULT 1000
+#endif
+
+static inline struct app_pipeline_data *
+app_pipeline_data(struct app_params *app, uint32_t id)
+{
+ struct app_pipeline_params *params;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", id, params);
+ if (params == NULL)
+ return NULL;
+
+ return &app->pipeline_data[params - app->pipeline_params];
+}
+
+static inline void *
+app_pipeline_data_fe(struct app_params *app, uint32_t id, struct pipeline_type *ptype)
+{
+ struct app_pipeline_data *pipeline_data;
+
+ pipeline_data = app_pipeline_data(app, id);
+ if (pipeline_data == NULL)
+ return NULL;
+
+ if (strcmp(pipeline_data->ptype->name, ptype->name) != 0)
+ return NULL;
+
+ if (pipeline_data->enabled == 0)
+ return NULL;
+
+ return pipeline_data->fe;
+}
+
+static inline struct rte_ring *
+app_pipeline_msgq_in_get(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_msgq_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->msgq_params,
+ "MSGQ-REQ-PIPELINE",
+ pipeline_id,
+ p);
+ if (p == NULL)
+ return NULL;
+
+ return app->msgq[p - app->msgq_params];
+}
+
+static inline struct rte_ring *
+app_pipeline_msgq_out_get(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_msgq_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->msgq_params,
+ "MSGQ-RSP-PIPELINE",
+ pipeline_id,
+ p);
+ if (p == NULL)
+ return NULL;
+
+ return app->msgq[p - app->msgq_params];
+}
+
+static inline void *
+app_msg_alloc(__rte_unused struct app_params *app)
+{
+ return rte_malloc(NULL, 2048, RTE_CACHE_LINE_SIZE);
+}
+
+static inline void
+app_msg_free(__rte_unused struct app_params *app,
+ void *msg)
+{
+ rte_free(msg);
+}
+
+static inline void
+app_msg_send(struct app_params *app,
+ uint32_t pipeline_id,
+ void *msg)
+{
+ struct rte_ring *r = app_pipeline_msgq_in_get(app, pipeline_id);
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+static inline void *
+app_msg_recv(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct rte_ring *r = app_pipeline_msgq_out_get(app, pipeline_id);
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void *
+app_msg_send_recv(struct app_params *app,
+ uint32_t pipeline_id,
+ void *msg,
+ uint32_t timeout_ms)
+{
+ struct rte_ring *r_req = app_pipeline_msgq_in_get(app, pipeline_id);
+ struct rte_ring *r_rsp = app_pipeline_msgq_out_get(app, pipeline_id);
+ uint64_t hz = rte_get_tsc_hz();
+ void *msg_recv;
+ uint64_t deadline;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(r_req, (void *) msg);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ deadline = (timeout_ms) ?
+ (rte_rdtsc() + ((hz * timeout_ms) / 1000)) :
+ UINT64_MAX;
+
+ do {
+ if (rte_rdtsc() > deadline)
+ return NULL;
+
+ status = rte_ring_sc_dequeue(r_rsp, &msg_recv);
+ } while (status != 0);
+
+ return msg_recv;
+}
+
+int
+app_pipeline_ping(struct app_params *app,
+ uint32_t pipeline_id);
+
+int
+app_pipeline_stats_port_in(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats);
+
+int
+app_pipeline_stats_port_out(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats);
+
+int
+app_pipeline_stats_table(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats);
+
+int
+app_pipeline_port_in_enable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_port_in_disable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_link_config(struct app_params *app,
+ uint32_t link_id,
+ uint32_t ip,
+ uint32_t depth);
+
+int
+app_link_up(struct app_params *app,
+ uint32_t link_id);
+
+int
+app_link_down(struct app_params *app,
+ uint32_t link_id);
+
+int
+app_pipeline_common_cmd_push(struct app_params *app);
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall.c b/examples/ip_pipeline/pipeline/pipeline_firewall.c
new file mode 100644
index 00000000..fd897d5c
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall.c
@@ -0,0 +1,1869 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+#include <cmdline_socket.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_firewall.h"
+
+#define BUF_SIZE 1024
+
+struct app_pipeline_firewall_rule {
+ struct pipeline_firewall_key key;
+ int32_t priority;
+ uint32_t port_id;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_firewall_rule) node;
+};
+
+struct app_pipeline_firewall {
+ /* parameters */
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+
+ /* rules */
+ TAILQ_HEAD(, app_pipeline_firewall_rule) rules;
+ uint32_t n_rules;
+ uint32_t default_rule_present;
+ uint32_t default_rule_port_id;
+ void *default_rule_entry_ptr;
+};
+
+struct app_pipeline_add_bulk_params {
+ struct pipeline_firewall_key *keys;
+ uint32_t n_keys;
+ uint32_t *priorities;
+ uint32_t *port_ids;
+};
+
+struct app_pipeline_del_bulk_params {
+ struct pipeline_firewall_key *keys;
+ uint32_t n_keys;
+};
+
+static void
+print_firewall_ipv4_rule(struct app_pipeline_firewall_rule *rule)
+{
+ printf("Prio = %" PRId32 " (SA = %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 "/%" PRIu32 ", "
+ "DA = %" PRIu32 ".%" PRIu32
+ ".%"PRIu32 ".%" PRIu32 "/%" PRIu32 ", "
+ "SP = %" PRIu32 "-%" PRIu32 ", "
+ "DP = %" PRIu32 "-%" PRIu32 ", "
+ "Proto = %" PRIu32 " / 0x%" PRIx32 ") => "
+ "Port = %" PRIu32 " (entry ptr = %p)\n",
+
+ rule->priority,
+
+ (rule->key.key.ipv4_5tuple.src_ip >> 24) & 0xFF,
+ (rule->key.key.ipv4_5tuple.src_ip >> 16) & 0xFF,
+ (rule->key.key.ipv4_5tuple.src_ip >> 8) & 0xFF,
+ rule->key.key.ipv4_5tuple.src_ip & 0xFF,
+ rule->key.key.ipv4_5tuple.src_ip_mask,
+
+ (rule->key.key.ipv4_5tuple.dst_ip >> 24) & 0xFF,
+ (rule->key.key.ipv4_5tuple.dst_ip >> 16) & 0xFF,
+ (rule->key.key.ipv4_5tuple.dst_ip >> 8) & 0xFF,
+ rule->key.key.ipv4_5tuple.dst_ip & 0xFF,
+ rule->key.key.ipv4_5tuple.dst_ip_mask,
+
+ rule->key.key.ipv4_5tuple.src_port_from,
+ rule->key.key.ipv4_5tuple.src_port_to,
+
+ rule->key.key.ipv4_5tuple.dst_port_from,
+ rule->key.key.ipv4_5tuple.dst_port_to,
+
+ rule->key.key.ipv4_5tuple.proto,
+ rule->key.key.ipv4_5tuple.proto_mask,
+
+ rule->port_id,
+ rule->entry_ptr);
+}
+
+static struct app_pipeline_firewall_rule *
+app_pipeline_firewall_rule_find(struct app_pipeline_firewall *p,
+ struct pipeline_firewall_key *key)
+{
+ struct app_pipeline_firewall_rule *r;
+
+ TAILQ_FOREACH(r, &p->rules, node)
+ if (memcmp(key,
+ &r->key,
+ sizeof(struct pipeline_firewall_key)) == 0)
+ return r;
+
+ return NULL;
+}
+
+static int
+app_pipeline_firewall_ls(
+ struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_firewall *p;
+ struct app_pipeline_firewall_rule *rule;
+ uint32_t n_rules;
+ int priority;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ n_rules = p->n_rules;
+ for (priority = 0; n_rules; priority++)
+ TAILQ_FOREACH(rule, &p->rules, node)
+ if (rule->priority == priority) {
+ print_firewall_ipv4_rule(rule);
+ n_rules--;
+ }
+
+ if (p->default_rule_present)
+ printf("Default rule: port %" PRIu32 " (entry ptr = %p)\n",
+ p->default_rule_port_id,
+ p->default_rule_entry_ptr);
+ else
+ printf("Default rule: DROP\n");
+
+ printf("\n");
+
+ return 0;
+}
+
+static void*
+app_pipeline_firewall_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct app_pipeline_firewall *p;
+ uint32_t size;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_firewall));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+
+ TAILQ_INIT(&p->rules);
+ p->n_rules = 0;
+ p->default_rule_present = 0;
+ p->default_rule_port_id = 0;
+ p->default_rule_entry_ptr = NULL;
+
+ return (void *) p;
+}
+
+static int
+app_pipeline_firewall_free(void *pipeline)
+{
+ struct app_pipeline_firewall *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ while (!TAILQ_EMPTY(&p->rules)) {
+ struct app_pipeline_firewall_rule *rule;
+
+ rule = TAILQ_FIRST(&p->rules);
+ TAILQ_REMOVE(&p->rules, rule, node);
+ rte_free(rule);
+ }
+
+ rte_free(p);
+ return 0;
+}
+
+static int
+app_pipeline_firewall_key_check_and_normalize(struct pipeline_firewall_key *key)
+{
+ switch (key->type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ {
+ uint32_t src_ip_depth = key->key.ipv4_5tuple.src_ip_mask;
+ uint32_t dst_ip_depth = key->key.ipv4_5tuple.dst_ip_mask;
+ uint16_t src_port_from = key->key.ipv4_5tuple.src_port_from;
+ uint16_t src_port_to = key->key.ipv4_5tuple.src_port_to;
+ uint16_t dst_port_from = key->key.ipv4_5tuple.dst_port_from;
+ uint16_t dst_port_to = key->key.ipv4_5tuple.dst_port_to;
+
+ uint32_t src_ip_netmask = 0;
+ uint32_t dst_ip_netmask = 0;
+
+ if ((src_ip_depth > 32) ||
+ (dst_ip_depth > 32) ||
+ (src_port_from > src_port_to) ||
+ (dst_port_from > dst_port_to))
+ return -1;
+
+ if (src_ip_depth)
+ src_ip_netmask = (~0U) << (32 - src_ip_depth);
+
+ if (dst_ip_depth)
+ dst_ip_netmask = ((~0U) << (32 - dst_ip_depth));
+
+ key->key.ipv4_5tuple.src_ip &= src_ip_netmask;
+ key->key.ipv4_5tuple.dst_ip &= dst_ip_netmask;
+
+ return 0;
+ }
+
+ default:
+ return -1;
+ }
+}
+
+static int
+app_pipeline_add_bulk_parse_file(char *filename,
+ struct app_pipeline_add_bulk_params *params)
+{
+ FILE *f;
+ char file_buf[BUF_SIZE];
+ uint32_t i;
+ int status = 0;
+
+ f = fopen(filename, "r");
+ if (f == NULL)
+ return -1;
+
+ params->n_keys = 0;
+ while (fgets(file_buf, BUF_SIZE, f) != NULL)
+ params->n_keys++;
+ rewind(f);
+
+ if (params->n_keys == 0) {
+ status = -1;
+ goto end;
+ }
+
+ params->keys = rte_malloc(NULL,
+ params->n_keys * sizeof(struct pipeline_firewall_key),
+ RTE_CACHE_LINE_SIZE);
+ if (params->keys == NULL) {
+ status = -1;
+ goto end;
+ }
+
+ params->priorities = rte_malloc(NULL,
+ params->n_keys * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (params->priorities == NULL) {
+ status = -1;
+ goto end;
+ }
+
+ params->port_ids = rte_malloc(NULL,
+ params->n_keys * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (params->port_ids == NULL) {
+ status = -1;
+ goto end;
+ }
+
+ i = 0;
+ while (fgets(file_buf, BUF_SIZE, f) != NULL) {
+ char *str;
+
+ str = strtok(file_buf, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->priorities[i] = atoi(str);
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip = atoi(str)<<24;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<16;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<8;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip_mask = atoi(str);
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip = atoi(str)<<24;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<16;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<8;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip_mask = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_port_from = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_port_to = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_port_from = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_port_to = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.proto = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ /* Need to add 2 to str to skip leading 0x */
+ params->keys[i].key.ipv4_5tuple.proto_mask = strtol(str+2, NULL, 16);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->port_ids[i] = atoi(str);
+ params->keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+
+ i++;
+ }
+
+end:
+ fclose(f);
+ return status;
+}
+
+static int
+app_pipeline_del_bulk_parse_file(char *filename,
+ struct app_pipeline_del_bulk_params *params)
+{
+ FILE *f;
+ char file_buf[BUF_SIZE];
+ uint32_t i;
+ int status = 0;
+
+ f = fopen(filename, "r");
+ if (f == NULL)
+ return -1;
+
+ params->n_keys = 0;
+ while (fgets(file_buf, BUF_SIZE, f) != NULL)
+ params->n_keys++;
+ rewind(f);
+
+ if (params->n_keys == 0) {
+ status = -1;
+ goto end;
+ }
+
+ params->keys = rte_malloc(NULL,
+ params->n_keys * sizeof(struct pipeline_firewall_key),
+ RTE_CACHE_LINE_SIZE);
+ if (params->keys == NULL) {
+ status = -1;
+ goto end;
+ }
+
+ i = 0;
+ while (fgets(file_buf, BUF_SIZE, f) != NULL) {
+ char *str;
+
+ str = strtok(file_buf, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip = atoi(str)<<24;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<16;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<8;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_ip_mask = atoi(str);
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip = atoi(str)<<24;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<16;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<8;
+
+ str = strtok(NULL, " .");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_ip_mask = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_port_from = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.src_port_to = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_port_from = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.dst_port_to = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ params->keys[i].key.ipv4_5tuple.proto = atoi(str);
+
+ str = strtok(NULL, " ");
+ if (str == NULL) {
+ status = -1;
+ goto end;
+ }
+ /* Need to add 2 to str to skip leading 0x */
+ params->keys[i].key.ipv4_5tuple.proto_mask = strtol(str+2, NULL, 16);
+
+ params->keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+
+ i++;
+ }
+
+ for (i = 0; i < params->n_keys; i++) {
+ if (app_pipeline_firewall_key_check_and_normalize(&params->keys[i]) != 0) {
+ status = -1;
+ goto end;
+ }
+ }
+
+end:
+ fclose(f);
+ return status;
+}
+
+int
+app_pipeline_firewall_add_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *key,
+ uint32_t priority,
+ uint32_t port_id)
+{
+ struct app_pipeline_firewall *p;
+ struct app_pipeline_firewall_rule *rule;
+ struct pipeline_firewall_add_msg_req *req;
+ struct pipeline_firewall_add_msg_rsp *rsp;
+ int new_rule;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (key->type != PIPELINE_FIREWALL_IPV4_5TUPLE))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ if (app_pipeline_firewall_key_check_and_normalize(key) != 0)
+ return -1;
+
+ /* Find existing rule or allocate new rule */
+ rule = app_pipeline_firewall_rule_find(p, key);
+ new_rule = (rule == NULL);
+ if (rule == NULL) {
+ rule = rte_malloc(NULL, sizeof(*rule), RTE_CACHE_LINE_SIZE);
+
+ if (rule == NULL)
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ if (new_rule)
+ rte_free(rule);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD;
+ memcpy(&req->key, key, sizeof(*key));
+ req->priority = priority;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ if (new_rule)
+ rte_free(rule);
+ return -1;
+ }
+
+ /* Read response and write rule */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL) ||
+ ((new_rule == 0) && (rsp->key_found == 0)) ||
+ ((new_rule == 1) && (rsp->key_found == 1))) {
+ app_msg_free(app, rsp);
+ if (new_rule)
+ rte_free(rule);
+ return -1;
+ }
+
+ memcpy(&rule->key, key, sizeof(*key));
+ rule->priority = priority;
+ rule->port_id = port_id;
+ rule->entry_ptr = rsp->entry_ptr;
+
+ /* Commit rule */
+ if (new_rule) {
+ TAILQ_INSERT_TAIL(&p->rules, rule, node);
+ p->n_rules++;
+ }
+
+ print_firewall_ipv4_rule(rule);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_firewall_delete_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *key)
+{
+ struct app_pipeline_firewall *p;
+ struct app_pipeline_firewall_rule *rule;
+ struct pipeline_firewall_del_msg_req *req;
+ struct pipeline_firewall_del_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (key->type != PIPELINE_FIREWALL_IPV4_5TUPLE))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ if (app_pipeline_firewall_key_check_and_normalize(key) != 0)
+ return -1;
+
+ /* Find rule */
+ rule = app_pipeline_firewall_rule_find(p, key);
+ if (rule == NULL)
+ return 0;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL;
+ memcpy(&req->key, key, sizeof(*key));
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Remove rule */
+ TAILQ_REMOVE(&p->rules, rule, node);
+ p->n_rules--;
+ rte_free(rule);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_firewall_add_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *keys,
+ uint32_t n_keys,
+ uint32_t *priorities,
+ uint32_t *port_ids)
+{
+ struct app_pipeline_firewall *p;
+ struct pipeline_firewall_add_bulk_msg_req *req;
+ struct pipeline_firewall_add_bulk_msg_rsp *rsp;
+
+ struct app_pipeline_firewall_rule **rules;
+ int *new_rules;
+
+ int *keys_found;
+ void **entries_ptr;
+
+ uint32_t i;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ rules = rte_malloc(NULL,
+ n_keys * sizeof(struct app_pipeline_firewall_rule *),
+ RTE_CACHE_LINE_SIZE);
+ if (rules == NULL)
+ return -1;
+
+ new_rules = rte_malloc(NULL,
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
+ if (new_rules == NULL) {
+ rte_free(rules);
+ return -1;
+ }
+
+ /* check data integrity and add to rule list */
+ for (i = 0; i < n_keys; i++) {
+ if (port_ids[i] >= p->n_ports_out) {
+ rte_free(rules);
+ rte_free(new_rules);
+ return -1;
+ }
+
+ if (app_pipeline_firewall_key_check_and_normalize(&keys[i]) != 0) {
+ rte_free(rules);
+ rte_free(new_rules);
+ return -1;
+ }
+
+ rules[i] = app_pipeline_firewall_rule_find(p, &keys[i]);
+ new_rules[i] = (rules[i] == NULL);
+ if (rules[i] == NULL) {
+ rules[i] = rte_malloc(NULL, sizeof(*rules[i]),
+ RTE_CACHE_LINE_SIZE);
+
+ if (rules[i] == NULL) {
+ uint32_t j;
+
+ for (j = 0; j <= i; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ return -1;
+ }
+ }
+ }
+
+ keys_found = rte_malloc(NULL,
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
+ if (keys_found == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ return -1;
+ }
+
+ entries_ptr = rte_malloc(NULL,
+ n_keys * sizeof(struct rte_pipeline_table_entry *),
+ RTE_CACHE_LINE_SIZE);
+ if (entries_ptr == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ return -1;
+ }
+ for (i = 0; i < n_keys; i++) {
+ entries_ptr[i] = rte_malloc(NULL,
+ sizeof(struct rte_pipeline_table_entry),
+ RTE_CACHE_LINE_SIZE);
+
+ if (entries_ptr[i] == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ for (j = 0; j <= i; j++)
+ rte_free(entries_ptr[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ rte_free(entries_ptr);
+ return -1;
+ }
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ for (j = 0; j < n_keys; j++)
+ rte_free(entries_ptr[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ rte_free(entries_ptr);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD_BULK;
+
+ req->keys = keys;
+ req->n_keys = n_keys;
+ req->port_ids = port_ids;
+ req->priorities = priorities;
+ req->keys_found = keys_found;
+ req->entries_ptr = entries_ptr;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ for (j = 0; j < n_keys; j++)
+ rte_free(entries_ptr[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ rte_free(entries_ptr);
+ return -1;
+ }
+
+ if (rsp->status) {
+ for (i = 0; i < n_keys; i++)
+ if (new_rules[i])
+ rte_free(rules[i]);
+
+ for (i = 0; i < n_keys; i++)
+ rte_free(entries_ptr[i]);
+
+ status = -1;
+ goto cleanup;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ if (entries_ptr[i] == NULL ||
+ ((new_rules[i] == 0) && (keys_found[i] == 0)) ||
+ ((new_rules[i] == 1) && (keys_found[i] == 1))) {
+ for (i = 0; i < n_keys; i++)
+ if (new_rules[i])
+ rte_free(rules[i]);
+
+ for (i = 0; i < n_keys; i++)
+ rte_free(entries_ptr[i]);
+
+ status = -1;
+ goto cleanup;
+ }
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ memcpy(&rules[i]->key, &keys[i], sizeof(keys[i]));
+ rules[i]->priority = priorities[i];
+ rules[i]->port_id = port_ids[i];
+ rules[i]->entry_ptr = entries_ptr[i];
+
+ /* Commit rule */
+ if (new_rules[i]) {
+ TAILQ_INSERT_TAIL(&p->rules, rules[i], node);
+ p->n_rules++;
+ }
+
+ print_firewall_ipv4_rule(rules[i]);
+ }
+
+cleanup:
+ app_msg_free(app, rsp);
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ rte_free(entries_ptr);
+
+ return status;
+}
+
+int
+app_pipeline_firewall_delete_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *keys,
+ uint32_t n_keys)
+{
+ struct app_pipeline_firewall *p;
+ struct pipeline_firewall_del_bulk_msg_req *req;
+ struct pipeline_firewall_del_bulk_msg_rsp *rsp;
+
+ struct app_pipeline_firewall_rule **rules;
+ int *keys_found;
+
+ uint32_t i;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ rules = rte_malloc(NULL,
+ n_keys * sizeof(struct app_pipeline_firewall_rule *),
+ RTE_CACHE_LINE_SIZE);
+ if (rules == NULL)
+ return -1;
+
+ for (i = 0; i < n_keys; i++) {
+ if (app_pipeline_firewall_key_check_and_normalize(&keys[i]) != 0) {
+ return -1;
+ }
+
+ rules[i] = app_pipeline_firewall_rule_find(p, &keys[i]);
+ }
+
+ keys_found = rte_malloc(NULL,
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
+ if (keys_found == NULL) {
+ rte_free(rules);
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ rte_free(rules);
+ rte_free(keys_found);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL_BULK;
+
+ req->keys = keys;
+ req->n_keys = n_keys;
+ req->keys_found = keys_found;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ rte_free(rules);
+ rte_free(keys_found);
+ return -1;
+ }
+
+ if (rsp->status) {
+ status = -1;
+ goto cleanup;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ if (keys_found[i] == 0) {
+ status = -1;
+ goto cleanup;
+ }
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ TAILQ_REMOVE(&p->rules, rules[i], node);
+ p->n_rules--;
+ rte_free(rules[i]);
+ }
+
+cleanup:
+ app_msg_free(app, rsp);
+ rte_free(rules);
+ rte_free(keys_found);
+
+ return status;
+}
+
+int
+app_pipeline_firewall_add_default_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_firewall *p;
+ struct pipeline_firewall_add_default_msg_req *req;
+ struct pipeline_firewall_add_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write rule */
+ if (rsp->status || (rsp->entry_ptr == NULL)) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ p->default_rule_port_id = port_id;
+ p->default_rule_entry_ptr = rsp->entry_ptr;
+
+ /* Commit rule */
+ p->default_rule_present = 1;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_firewall_delete_default_rule(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_firewall *p;
+ struct pipeline_firewall_del_default_msg_req *req;
+ struct pipeline_firewall_del_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write rule */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit rule */
+ p->default_rule_present = 0;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * p firewall add ipv4
+ */
+
+struct cmd_firewall_add_ipv4_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t firewall_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t ipv4_string;
+ int32_t priority;
+ cmdline_ipaddr_t src_ip;
+ uint32_t src_ip_mask;
+ cmdline_ipaddr_t dst_ip;
+ uint32_t dst_ip_mask;
+ uint16_t src_port_from;
+ uint16_t src_port_to;
+ uint16_t dst_port_from;
+ uint16_t dst_port_to;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint8_t port_id;
+};
+
+static void
+cmd_firewall_add_ipv4_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_firewall_add_ipv4_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_firewall_key key;
+ int status;
+
+ key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.src_ip = rte_bswap32(
+ (uint32_t) params->src_ip.addr.ipv4.s_addr);
+ key.key.ipv4_5tuple.src_ip_mask = params->src_ip_mask;
+ key.key.ipv4_5tuple.dst_ip = rte_bswap32(
+ (uint32_t) params->dst_ip.addr.ipv4.s_addr);
+ key.key.ipv4_5tuple.dst_ip_mask = params->dst_ip_mask;
+ key.key.ipv4_5tuple.src_port_from = params->src_port_from;
+ key.key.ipv4_5tuple.src_port_to = params->src_port_to;
+ key.key.ipv4_5tuple.dst_port_from = params->dst_port_from;
+ key.key.ipv4_5tuple.dst_port_to = params->dst_port_to;
+ key.key.ipv4_5tuple.proto = params->proto;
+ key.key.ipv4_5tuple.proto_mask = params->proto_mask;
+
+ status = app_pipeline_firewall_add_rule(app,
+ params->pipeline_id,
+ &key,
+ params->priority,
+ params->port_id);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+cmdline_parse_token_string_t cmd_firewall_add_ipv4_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_firewall_add_ipv4_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ firewall_string, "firewall");
+
+cmdline_parse_token_string_t cmd_firewall_add_ipv4_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ add_string, "add");
+
+cmdline_parse_token_string_t cmd_firewall_add_ipv4_ipv4_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ ipv4_string, "ipv4");
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_priority =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, priority,
+ INT32);
+
+cmdline_parse_token_ipaddr_t cmd_firewall_add_ipv4_src_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_firewall_add_ipv4_result, src_ip);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_ip_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, src_ip_mask,
+ UINT32);
+
+cmdline_parse_token_ipaddr_t cmd_firewall_add_ipv4_dst_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_firewall_add_ipv4_result, dst_ip);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_ip_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, dst_ip_mask,
+ UINT32);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_port_from =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ src_port_from, UINT16);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_port_to =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ src_port_to, UINT16);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_port_from =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ dst_port_from, UINT16);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_port_to =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ dst_port_to, UINT16);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_proto =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ proto, UINT8);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_proto_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ proto_mask, UINT8);
+
+cmdline_parse_token_num_t cmd_firewall_add_ipv4_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_firewall_add_ipv4 = {
+ .f = cmd_firewall_add_ipv4_parsed,
+ .data = NULL,
+ .help_str = "Firewall rule add",
+ .tokens = {
+ (void *) &cmd_firewall_add_ipv4_p_string,
+ (void *) &cmd_firewall_add_ipv4_pipeline_id,
+ (void *) &cmd_firewall_add_ipv4_firewall_string,
+ (void *) &cmd_firewall_add_ipv4_add_string,
+ (void *) &cmd_firewall_add_ipv4_ipv4_string,
+ (void *) &cmd_firewall_add_ipv4_priority,
+ (void *) &cmd_firewall_add_ipv4_src_ip,
+ (void *) &cmd_firewall_add_ipv4_src_ip_mask,
+ (void *) &cmd_firewall_add_ipv4_dst_ip,
+ (void *) &cmd_firewall_add_ipv4_dst_ip_mask,
+ (void *) &cmd_firewall_add_ipv4_src_port_from,
+ (void *) &cmd_firewall_add_ipv4_src_port_to,
+ (void *) &cmd_firewall_add_ipv4_dst_port_from,
+ (void *) &cmd_firewall_add_ipv4_dst_port_to,
+ (void *) &cmd_firewall_add_ipv4_proto,
+ (void *) &cmd_firewall_add_ipv4_proto_mask,
+ (void *) &cmd_firewall_add_ipv4_port_id,
+ NULL,
+ },
+};
+
+/*
+ * p firewall del ipv4
+ */
+
+struct cmd_firewall_del_ipv4_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t firewall_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t ipv4_string;
+ cmdline_ipaddr_t src_ip;
+ uint32_t src_ip_mask;
+ cmdline_ipaddr_t dst_ip;
+ uint32_t dst_ip_mask;
+ uint16_t src_port_from;
+ uint16_t src_port_to;
+ uint16_t dst_port_from;
+ uint16_t dst_port_to;
+ uint8_t proto;
+ uint8_t proto_mask;
+};
+
+static void
+cmd_firewall_del_ipv4_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_firewall_del_ipv4_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_firewall_key key;
+ int status;
+
+ key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.src_ip = rte_bswap32(
+ (uint32_t) params->src_ip.addr.ipv4.s_addr);
+ key.key.ipv4_5tuple.src_ip_mask = params->src_ip_mask;
+ key.key.ipv4_5tuple.dst_ip = rte_bswap32(
+ (uint32_t) params->dst_ip.addr.ipv4.s_addr);
+ key.key.ipv4_5tuple.dst_ip_mask = params->dst_ip_mask;
+ key.key.ipv4_5tuple.src_port_from = params->src_port_from;
+ key.key.ipv4_5tuple.src_port_to = params->src_port_to;
+ key.key.ipv4_5tuple.dst_port_from = params->dst_port_from;
+ key.key.ipv4_5tuple.dst_port_to = params->dst_port_to;
+ key.key.ipv4_5tuple.proto = params->proto;
+ key.key.ipv4_5tuple.proto_mask = params->proto_mask;
+
+ status = app_pipeline_firewall_delete_rule(app,
+ params->pipeline_id,
+ &key);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+cmdline_parse_token_string_t cmd_firewall_del_ipv4_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_firewall_del_ipv4_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
+ firewall_string, "firewall");
+
+cmdline_parse_token_string_t cmd_firewall_del_ipv4_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
+ del_string, "del");
+
+cmdline_parse_token_string_t cmd_firewall_del_ipv4_ipv4_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
+ ipv4_string, "ipv4");
+
+cmdline_parse_token_ipaddr_t cmd_firewall_del_ipv4_src_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_ip);
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_ip_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_ip_mask,
+ UINT32);
+
+cmdline_parse_token_ipaddr_t cmd_firewall_del_ipv4_dst_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_firewall_del_ipv4_result, dst_ip);
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_ip_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, dst_ip_mask,
+ UINT32);
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_port_from =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
+ src_port_from, UINT16);
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_port_to =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_port_to,
+ UINT16);
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_port_from =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
+ dst_port_from, UINT16);
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_port_to =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
+ dst_port_to, UINT16);
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_proto =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
+ proto, UINT8);
+
+cmdline_parse_token_num_t cmd_firewall_del_ipv4_proto_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, proto_mask,
+ UINT8);
+
+cmdline_parse_inst_t cmd_firewall_del_ipv4 = {
+ .f = cmd_firewall_del_ipv4_parsed,
+ .data = NULL,
+ .help_str = "Firewall rule delete",
+ .tokens = {
+ (void *) &cmd_firewall_del_ipv4_p_string,
+ (void *) &cmd_firewall_del_ipv4_pipeline_id,
+ (void *) &cmd_firewall_del_ipv4_firewall_string,
+ (void *) &cmd_firewall_del_ipv4_del_string,
+ (void *) &cmd_firewall_del_ipv4_ipv4_string,
+ (void *) &cmd_firewall_del_ipv4_src_ip,
+ (void *) &cmd_firewall_del_ipv4_src_ip_mask,
+ (void *) &cmd_firewall_del_ipv4_dst_ip,
+ (void *) &cmd_firewall_del_ipv4_dst_ip_mask,
+ (void *) &cmd_firewall_del_ipv4_src_port_from,
+ (void *) &cmd_firewall_del_ipv4_src_port_to,
+ (void *) &cmd_firewall_del_ipv4_dst_port_from,
+ (void *) &cmd_firewall_del_ipv4_dst_port_to,
+ (void *) &cmd_firewall_del_ipv4_proto,
+ (void *) &cmd_firewall_del_ipv4_proto_mask,
+ NULL,
+ },
+};
+
+/*
+ * p firewall add bulk
+ */
+
+struct cmd_firewall_add_bulk_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t firewall_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t bulk_string;
+ cmdline_fixed_string_t file_path;
+};
+
+static void
+cmd_firewall_add_bulk_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_firewall_add_bulk_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ struct app_pipeline_add_bulk_params add_bulk_params;
+
+ status = app_pipeline_add_bulk_parse_file(params->file_path, &add_bulk_params);
+ if (status != 0) {
+ printf("Command failed\n");
+ goto end;
+ }
+
+ status = app_pipeline_firewall_add_bulk(app, params->pipeline_id, add_bulk_params.keys,
+ add_bulk_params.n_keys, add_bulk_params.priorities, add_bulk_params.port_ids);
+ if (status != 0) {
+ printf("Command failed\n");
+ goto end;
+ }
+
+end:
+ rte_free(add_bulk_params.keys);
+ rte_free(add_bulk_params.priorities);
+ rte_free(add_bulk_params.port_ids);
+}
+
+cmdline_parse_token_string_t cmd_firewall_add_bulk_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_firewall_add_bulk_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_bulk_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_firewall_add_bulk_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
+ firewall_string, "firewall");
+
+cmdline_parse_token_string_t cmd_firewall_add_bulk_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
+ add_string, "add");
+
+cmdline_parse_token_string_t cmd_firewall_add_bulk_bulk_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
+ bulk_string, "bulk");
+
+cmdline_parse_token_string_t cmd_firewall_add_bulk_file_path_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
+ file_path, NULL);
+
+cmdline_parse_inst_t cmd_firewall_add_bulk = {
+ .f = cmd_firewall_add_bulk_parsed,
+ .data = NULL,
+ .help_str = "Firewall rule add bulk",
+ .tokens = {
+ (void *) &cmd_firewall_add_bulk_p_string,
+ (void *) &cmd_firewall_add_bulk_pipeline_id,
+ (void *) &cmd_firewall_add_bulk_firewall_string,
+ (void *) &cmd_firewall_add_bulk_add_string,
+ (void *) &cmd_firewall_add_bulk_bulk_string,
+ (void *) &cmd_firewall_add_bulk_file_path_string,
+ NULL,
+ },
+};
+
+/*
+ * p firewall del bulk
+ */
+
+struct cmd_firewall_del_bulk_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t firewall_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t bulk_string;
+ cmdline_fixed_string_t file_path;
+};
+
+static void
+cmd_firewall_del_bulk_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_firewall_del_bulk_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ struct app_pipeline_del_bulk_params del_bulk_params;
+
+ status = app_pipeline_del_bulk_parse_file(params->file_path, &del_bulk_params);
+ if (status != 0) {
+ printf("Command failed\n");
+ goto end;
+ }
+
+ status = app_pipeline_firewall_delete_bulk(app, params->pipeline_id,
+ del_bulk_params.keys, del_bulk_params.n_keys);
+ if (status != 0) {
+ printf("Command failed\n");
+ goto end;
+ }
+
+end:
+ rte_free(del_bulk_params.keys);
+}
+
+cmdline_parse_token_string_t cmd_firewall_del_bulk_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_firewall_del_bulk_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_bulk_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_firewall_del_bulk_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
+ firewall_string, "firewall");
+
+cmdline_parse_token_string_t cmd_firewall_del_bulk_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
+ del_string, "del");
+
+cmdline_parse_token_string_t cmd_firewall_del_bulk_bulk_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
+ bulk_string, "bulk");
+
+cmdline_parse_token_string_t cmd_firewall_del_bulk_file_path_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
+ file_path, NULL);
+
+cmdline_parse_inst_t cmd_firewall_del_bulk = {
+ .f = cmd_firewall_del_bulk_parsed,
+ .data = NULL,
+ .help_str = "Firewall rule del bulk",
+ .tokens = {
+ (void *) &cmd_firewall_del_bulk_p_string,
+ (void *) &cmd_firewall_del_bulk_pipeline_id,
+ (void *) &cmd_firewall_del_bulk_firewall_string,
+ (void *) &cmd_firewall_del_bulk_add_string,
+ (void *) &cmd_firewall_del_bulk_bulk_string,
+ (void *) &cmd_firewall_del_bulk_file_path_string,
+ NULL,
+ },
+};
+
+/*
+ * p firewall add default
+ */
+struct cmd_firewall_add_default_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t firewall_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t default_string;
+ uint8_t port_id;
+};
+
+static void
+cmd_firewall_add_default_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_firewall_add_default_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_firewall_add_default_rule(app,
+ params->pipeline_id,
+ params->port_id);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+cmdline_parse_token_string_t cmd_firewall_add_default_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_firewall_add_default_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_default_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_firewall_add_default_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
+ firewall_string, "firewall");
+
+cmdline_parse_token_string_t cmd_firewall_add_default_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
+ add_string, "add");
+
+cmdline_parse_token_string_t cmd_firewall_add_default_default_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
+ default_string, "default");
+
+cmdline_parse_token_num_t cmd_firewall_add_default_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_default_result, port_id,
+ UINT8);
+
+cmdline_parse_inst_t cmd_firewall_add_default = {
+ .f = cmd_firewall_add_default_parsed,
+ .data = NULL,
+ .help_str = "Firewall default rule add",
+ .tokens = {
+ (void *) &cmd_firewall_add_default_p_string,
+ (void *) &cmd_firewall_add_default_pipeline_id,
+ (void *) &cmd_firewall_add_default_firewall_string,
+ (void *) &cmd_firewall_add_default_add_string,
+ (void *) &cmd_firewall_add_default_default_string,
+ (void *) &cmd_firewall_add_default_port_id,
+ NULL,
+ },
+};
+
+/*
+ * p firewall del default
+ */
+struct cmd_firewall_del_default_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t firewall_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t default_string;
+};
+
+static void
+cmd_firewall_del_default_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_firewall_del_default_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_firewall_delete_default_rule(app,
+ params->pipeline_id);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+cmdline_parse_token_string_t cmd_firewall_del_default_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_firewall_del_default_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_default_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_firewall_del_default_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
+ firewall_string, "firewall");
+
+cmdline_parse_token_string_t cmd_firewall_del_default_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
+ del_string, "del");
+
+cmdline_parse_token_string_t cmd_firewall_del_default_default_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
+ default_string, "default");
+
+cmdline_parse_inst_t cmd_firewall_del_default = {
+ .f = cmd_firewall_del_default_parsed,
+ .data = NULL,
+ .help_str = "Firewall default rule delete",
+ .tokens = {
+ (void *) &cmd_firewall_del_default_p_string,
+ (void *) &cmd_firewall_del_default_pipeline_id,
+ (void *) &cmd_firewall_del_default_firewall_string,
+ (void *) &cmd_firewall_del_default_del_string,
+ (void *) &cmd_firewall_del_default_default_string,
+ NULL,
+ },
+};
+
+/*
+ * p firewall ls
+ */
+
+struct cmd_firewall_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t firewall_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+static void
+cmd_firewall_ls_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_firewall_ls_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_firewall_ls(app, params->pipeline_id);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+cmdline_parse_token_string_t cmd_firewall_ls_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_firewall_ls_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_ls_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_firewall_ls_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result,
+ firewall_string, "firewall");
+
+cmdline_parse_token_string_t cmd_firewall_ls_ls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result, ls_string,
+ "ls");
+
+cmdline_parse_inst_t cmd_firewall_ls = {
+ .f = cmd_firewall_ls_parsed,
+ .data = NULL,
+ .help_str = "Firewall rule list",
+ .tokens = {
+ (void *) &cmd_firewall_ls_p_string,
+ (void *) &cmd_firewall_ls_pipeline_id,
+ (void *) &cmd_firewall_ls_firewall_string,
+ (void *) &cmd_firewall_ls_ls_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_firewall_add_ipv4,
+ (cmdline_parse_inst_t *) &cmd_firewall_del_ipv4,
+ (cmdline_parse_inst_t *) &cmd_firewall_add_bulk,
+ (cmdline_parse_inst_t *) &cmd_firewall_del_bulk,
+ (cmdline_parse_inst_t *) &cmd_firewall_add_default,
+ (cmdline_parse_inst_t *) &cmd_firewall_del_default,
+ (cmdline_parse_inst_t *) &cmd_firewall_ls,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_firewall_fe_ops = {
+ .f_init = app_pipeline_firewall_init,
+ .f_free = app_pipeline_firewall_free,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_firewall = {
+ .name = "FIREWALL",
+ .be_ops = &pipeline_firewall_be_ops,
+ .fe_ops = &pipeline_firewall_fe_ops,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall.h b/examples/ip_pipeline/pipeline/pipeline_firewall.h
new file mode 100644
index 00000000..ccc4e64b
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall.h
@@ -0,0 +1,77 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FIREWALL_H__
+#define __INCLUDE_PIPELINE_FIREWALL_H__
+
+#include "pipeline.h"
+#include "pipeline_firewall_be.h"
+
+int
+app_pipeline_firewall_add_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *key,
+ uint32_t priority,
+ uint32_t port_id);
+
+int
+app_pipeline_firewall_delete_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *key);
+
+int
+app_pipeline_firewall_add_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *keys,
+ uint32_t n_keys,
+ uint32_t *priorities,
+ uint32_t *port_ids);
+
+int
+app_pipeline_firewall_delete_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *keys,
+ uint32_t n_keys);
+
+int
+app_pipeline_firewall_add_default_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_firewall_delete_default_rule(struct app_params *app,
+ uint32_t pipeline_id);
+
+extern struct pipeline_type pipeline_firewall;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
new file mode 100644
index 00000000..e7a8a4c5
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
@@ -0,0 +1,907 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_byteorder.h>
+#include <rte_table_acl.h>
+
+#include "pipeline_firewall_be.h"
+#include "parser.h"
+
+struct pipeline_firewall {
+ struct pipeline p;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_FIREWALL_MSG_REQS];
+
+ uint32_t n_rules;
+ uint32_t n_rule_fields;
+ struct rte_acl_field_def *field_format;
+ uint32_t field_format_size;
+} __rte_cache_aligned;
+
+static void *
+pipeline_firewall_msg_req_custom_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_firewall_msg_req_custom_handler,
+};
+
+static void *
+pipeline_firewall_msg_req_add_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_del_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_del_bulk_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_add_default_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_del_default_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_FIREWALL_MSG_REQ_ADD] =
+ pipeline_firewall_msg_req_add_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_DEL] =
+ pipeline_firewall_msg_req_del_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_ADD_BULK] =
+ pipeline_firewall_msg_req_add_bulk_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_DEL_BULK] =
+ pipeline_firewall_msg_req_del_bulk_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT] =
+ pipeline_firewall_msg_req_add_default_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT] =
+ pipeline_firewall_msg_req_del_default_handler,
+};
+
+/*
+ * Firewall table
+ */
+struct firewall_table_entry {
+ struct rte_pipeline_table_entry head;
+};
+
+static struct rte_acl_field_def field_format_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+#define SIZEOF_VLAN_HDR 4
+
+static struct rte_acl_field_def field_format_vlan_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+#define SIZEOF_QINQ_HEADER 8
+
+static struct rte_acl_field_def field_format_qinq_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+static int
+pipeline_firewall_parse_args(struct pipeline_firewall *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_rules_present = 0;
+ uint32_t pkt_type_present = 0;
+ uint32_t i;
+
+ /* defaults */
+ p->n_rules = 4 * 1024;
+ p->n_rule_fields = RTE_DIM(field_format_ipv4);
+ p->field_format = field_format_ipv4;
+ p->field_format_size = sizeof(field_format_ipv4);
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ if (strcmp(arg_name, "n_rules") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_rules_present == 0, params->name,
+ arg_name);
+ n_rules_present = 1;
+
+ status = parser_read_uint32(&p->n_rules,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+ continue;
+ }
+
+ if (strcmp(arg_name, "pkt_type") == 0) {
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ pkt_type_present == 0, params->name,
+ arg_name);
+ pkt_type_present = 1;
+
+ /* ipv4 */
+ if (strcmp(arg_value, "ipv4") == 0) {
+ p->n_rule_fields = RTE_DIM(field_format_ipv4);
+ p->field_format = field_format_ipv4;
+ p->field_format_size =
+ sizeof(field_format_ipv4);
+ continue;
+ }
+
+ /* vlan_ipv4 */
+ if (strcmp(arg_value, "vlan_ipv4") == 0) {
+ p->n_rule_fields =
+ RTE_DIM(field_format_vlan_ipv4);
+ p->field_format = field_format_vlan_ipv4;
+ p->field_format_size =
+ sizeof(field_format_vlan_ipv4);
+ continue;
+ }
+
+ /* qinq_ipv4 */
+ if (strcmp(arg_value, "qinq_ipv4") == 0) {
+ p->n_rule_fields =
+ RTE_DIM(field_format_qinq_ipv4);
+ p->field_format = field_format_qinq_ipv4;
+ p->field_format_size =
+ sizeof(field_format_qinq_ipv4);
+ continue;
+ }
+
+ /* other */
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+ }
+
+ /* other */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ return 0;
+}
+
+static void *
+pipeline_firewall_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_firewall *p_fw;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_firewall));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_fw = (struct pipeline_firewall *) p;
+ if (p == NULL)
+ return NULL;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Firewall");
+
+ /* Parse arguments */
+ if (pipeline_firewall_parse_args(p_fw, params))
+ return NULL;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ p->n_tables = 1;
+ {
+ struct rte_table_acl_params table_acl_params = {
+ .name = params->name,
+ .n_rules = p_fw->n_rules,
+ .n_rule_fields = p_fw->n_rule_fields,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_acl_ops,
+ .arg_create = &table_acl_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size =
+ sizeof(struct firewall_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ memcpy(table_acl_params.field_format,
+ p_fw->field_format,
+ p_fw->field_format_size);
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_fw->custom_handlers,
+ custom_handlers,
+ sizeof(p_fw->custom_handlers));
+
+ return p;
+}
+
+static int
+pipeline_firewall_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_firewall_track(void *pipeline,
+ __rte_unused uint32_t port_in,
+ uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_in == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int
+pipeline_firewall_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+void *
+pipeline_firewall_msg_req_custom_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_firewall *p_fw = (struct pipeline_firewall *) p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_FIREWALL_MSG_REQS) ?
+ p_fw->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+void *
+pipeline_firewall_msg_req_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_add_msg_req *req = msg;
+ struct pipeline_firewall_add_msg_rsp *rsp = msg;
+
+ struct rte_table_acl_rule_add_params params;
+ struct firewall_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+ };
+
+ memset(&params, 0, sizeof(params));
+
+ switch (req->key.type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ params.priority = req->priority;
+ params.field_value[0].value.u8 =
+ req->key.key.ipv4_5tuple.proto;
+ params.field_value[0].mask_range.u8 =
+ req->key.key.ipv4_5tuple.proto_mask;
+ params.field_value[1].value.u32 =
+ req->key.key.ipv4_5tuple.src_ip;
+ params.field_value[1].mask_range.u32 =
+ req->key.key.ipv4_5tuple.src_ip_mask;
+ params.field_value[2].value.u32 =
+ req->key.key.ipv4_5tuple.dst_ip;
+ params.field_value[2].mask_range.u32 =
+ req->key.key.ipv4_5tuple.dst_ip_mask;
+ params.field_value[3].value.u16 =
+ req->key.key.ipv4_5tuple.src_port_from;
+ params.field_value[3].mask_range.u16 =
+ req->key.key.ipv4_5tuple.src_port_to;
+ params.field_value[4].value.u16 =
+ req->key.key.ipv4_5tuple.dst_port_from;
+ params.field_value[4].mask_range.u16 =
+ req->key.key.ipv4_5tuple.dst_port_to;
+ break;
+
+ default:
+ rsp->status = -1; /* Error */
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &params,
+ (struct rte_pipeline_table_entry *) &entry,
+ &rsp->key_found,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_firewall_msg_req_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_del_msg_req *req = msg;
+ struct pipeline_firewall_del_msg_rsp *rsp = msg;
+
+ struct rte_table_acl_rule_delete_params params;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (req->key.type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ params.field_value[0].value.u8 =
+ req->key.key.ipv4_5tuple.proto;
+ params.field_value[0].mask_range.u8 =
+ req->key.key.ipv4_5tuple.proto_mask;
+ params.field_value[1].value.u32 =
+ req->key.key.ipv4_5tuple.src_ip;
+ params.field_value[1].mask_range.u32 =
+ req->key.key.ipv4_5tuple.src_ip_mask;
+ params.field_value[2].value.u32 =
+ req->key.key.ipv4_5tuple.dst_ip;
+ params.field_value[2].mask_range.u32 =
+ req->key.key.ipv4_5tuple.dst_ip_mask;
+ params.field_value[3].value.u16 =
+ req->key.key.ipv4_5tuple.src_port_from;
+ params.field_value[3].mask_range.u16 =
+ req->key.key.ipv4_5tuple.src_port_to;
+ params.field_value[4].value.u16 =
+ req->key.key.ipv4_5tuple.dst_port_from;
+ params.field_value[4].mask_range.u16 =
+ req->key.key.ipv4_5tuple.dst_port_to;
+ break;
+
+ default:
+ rsp->status = -1; /* Error */
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ p->table_id[0],
+ &params,
+ &rsp->key_found,
+ NULL);
+
+ return rsp;
+}
+
+static void *
+pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_add_bulk_msg_req *req = msg;
+ struct pipeline_firewall_add_bulk_msg_rsp *rsp = msg;
+
+ struct rte_table_acl_rule_add_params *params[req->n_keys];
+ struct firewall_table_entry *entries[req->n_keys];
+
+ uint32_t i, n_keys;
+
+ n_keys = req->n_keys;
+
+ for (i = 0; i < n_keys; i++) {
+ entries[i] = rte_malloc(NULL,
+ sizeof(struct firewall_table_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (entries[i] == NULL) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ params[i] = rte_malloc(NULL,
+ sizeof(struct rte_table_acl_rule_add_params),
+ RTE_CACHE_LINE_SIZE);
+ if (params[i] == NULL) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ entries[i]->head.action = RTE_PIPELINE_ACTION_PORT;
+ entries[i]->head.port_id = p->port_out_id[req->port_ids[i]];
+
+ switch (req->keys[i].type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ params[i]->priority = req->priorities[i];
+ params[i]->field_value[0].value.u8 =
+ req->keys[i].key.ipv4_5tuple.proto;
+ params[i]->field_value[0].mask_range.u8 =
+ req->keys[i].key.ipv4_5tuple.proto_mask;
+ params[i]->field_value[1].value.u32 =
+ req->keys[i].key.ipv4_5tuple.src_ip;
+ params[i]->field_value[1].mask_range.u32 =
+ req->keys[i].key.ipv4_5tuple.src_ip_mask;
+ params[i]->field_value[2].value.u32 =
+ req->keys[i].key.ipv4_5tuple.dst_ip;
+ params[i]->field_value[2].mask_range.u32 =
+ req->keys[i].key.ipv4_5tuple.dst_ip_mask;
+ params[i]->field_value[3].value.u16 =
+ req->keys[i].key.ipv4_5tuple.src_port_from;
+ params[i]->field_value[3].mask_range.u16 =
+ req->keys[i].key.ipv4_5tuple.src_port_to;
+ params[i]->field_value[4].value.u16 =
+ req->keys[i].key.ipv4_5tuple.dst_port_from;
+ params[i]->field_value[4].mask_range.u16 =
+ req->keys[i].key.ipv4_5tuple.dst_port_to;
+ break;
+
+ default:
+ rsp->status = -1; /* Error */
+
+ for (i = 0; i < n_keys; i++) {
+ rte_free(entries[i]);
+ rte_free(params[i]);
+ }
+
+ return rsp;
+ }
+ }
+
+ rsp->status = rte_pipeline_table_entry_add_bulk(p->p, p->table_id[0],
+ (void *)params, (struct rte_pipeline_table_entry **)entries,
+ n_keys, req->keys_found,
+ (struct rte_pipeline_table_entry **)req->entries_ptr);
+
+ for (i = 0; i < n_keys; i++) {
+ rte_free(entries[i]);
+ rte_free(params[i]);
+ }
+
+ return rsp;
+}
+
+static void *
+pipeline_firewall_msg_req_del_bulk_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_del_bulk_msg_req *req = msg;
+ struct pipeline_firewall_del_bulk_msg_rsp *rsp = msg;
+
+ struct rte_table_acl_rule_delete_params *params[req->n_keys];
+
+ uint32_t i, n_keys;
+
+ n_keys = req->n_keys;
+
+ for (i = 0; i < n_keys; i++) {
+ params[i] = rte_malloc(NULL,
+ sizeof(struct rte_table_acl_rule_delete_params),
+ RTE_CACHE_LINE_SIZE);
+ if (params[i] == NULL) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ switch (req->keys[i].type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ params[i]->field_value[0].value.u8 =
+ req->keys[i].key.ipv4_5tuple.proto;
+ params[i]->field_value[0].mask_range.u8 =
+ req->keys[i].key.ipv4_5tuple.proto_mask;
+ params[i]->field_value[1].value.u32 =
+ req->keys[i].key.ipv4_5tuple.src_ip;
+ params[i]->field_value[1].mask_range.u32 =
+ req->keys[i].key.ipv4_5tuple.src_ip_mask;
+ params[i]->field_value[2].value.u32 =
+ req->keys[i].key.ipv4_5tuple.dst_ip;
+ params[i]->field_value[2].mask_range.u32 =
+ req->keys[i].key.ipv4_5tuple.dst_ip_mask;
+ params[i]->field_value[3].value.u16 =
+ req->keys[i].key.ipv4_5tuple.src_port_from;
+ params[i]->field_value[3].mask_range.u16 =
+ req->keys[i].key.ipv4_5tuple.src_port_to;
+ params[i]->field_value[4].value.u16 =
+ req->keys[i].key.ipv4_5tuple.dst_port_from;
+ params[i]->field_value[4].mask_range.u16 =
+ req->keys[i].key.ipv4_5tuple.dst_port_to;
+ break;
+
+ default:
+ rsp->status = -1; /* Error */
+
+ for (i = 0; i < n_keys; i++)
+ rte_free(params[i]);
+
+ return rsp;
+ }
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete_bulk(p->p, p->table_id[0],
+ (void **)&params, n_keys, req->keys_found, NULL);
+
+ for (i = 0; i < n_keys; i++)
+ rte_free(params[i]);
+
+ return rsp;
+}
+
+void *
+pipeline_firewall_msg_req_add_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_add_default_msg_req *req = msg;
+ struct pipeline_firewall_add_default_msg_rsp *rsp = msg;
+
+ struct firewall_table_entry default_entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+ };
+
+ rsp->status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[0],
+ (struct rte_pipeline_table_entry *) &default_entry,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_firewall_msg_req_del_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_del_default_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ p->table_id[0],
+ NULL);
+
+ return rsp;
+}
+
+struct pipeline_be_ops pipeline_firewall_be_ops = {
+ .f_init = pipeline_firewall_init,
+ .f_free = pipeline_firewall_free,
+ .f_run = NULL,
+ .f_timer = pipeline_firewall_timer,
+ .f_track = pipeline_firewall_track,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall_be.h b/examples/ip_pipeline/pipeline/pipeline_firewall_be.h
new file mode 100644
index 00000000..f5b0522f
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall_be.h
@@ -0,0 +1,176 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FIREWALL_BE_H__
+#define __INCLUDE_PIPELINE_FIREWALL_BE_H__
+
+#include "pipeline_common_be.h"
+
+enum pipeline_firewall_key_type {
+ PIPELINE_FIREWALL_IPV4_5TUPLE,
+};
+
+struct pipeline_firewall_key_ipv4_5tuple {
+ uint32_t src_ip;
+ uint32_t src_ip_mask;
+ uint32_t dst_ip;
+ uint32_t dst_ip_mask;
+ uint16_t src_port_from;
+ uint16_t src_port_to;
+ uint16_t dst_port_from;
+ uint16_t dst_port_to;
+ uint8_t proto;
+ uint8_t proto_mask;
+};
+
+struct pipeline_firewall_key {
+ enum pipeline_firewall_key_type type;
+ union {
+ struct pipeline_firewall_key_ipv4_5tuple ipv4_5tuple;
+ } key;
+};
+
+enum pipeline_firewall_msg_req_type {
+ PIPELINE_FIREWALL_MSG_REQ_ADD = 0,
+ PIPELINE_FIREWALL_MSG_REQ_DEL,
+ PIPELINE_FIREWALL_MSG_REQ_ADD_BULK,
+ PIPELINE_FIREWALL_MSG_REQ_DEL_BULK,
+ PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT,
+ PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT,
+ PIPELINE_FIREWALL_MSG_REQS
+};
+
+/*
+ * MSG ADD
+ */
+struct pipeline_firewall_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_firewall_key key;
+
+ /* data */
+ int32_t priority;
+ uint32_t port_id;
+};
+
+struct pipeline_firewall_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/*
+ * MSG DEL
+ */
+struct pipeline_firewall_del_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_firewall_key key;
+};
+
+struct pipeline_firewall_del_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ADD BULK
+ */
+struct pipeline_firewall_add_bulk_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ struct pipeline_firewall_key *keys;
+ uint32_t n_keys;
+
+ uint32_t *priorities;
+ uint32_t *port_ids;
+ int *keys_found;
+ void **entries_ptr;
+};
+struct pipeline_firewall_add_bulk_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG DEL BULK
+ */
+struct pipeline_firewall_del_bulk_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_firewall_key *keys;
+ uint32_t n_keys;
+ int *keys_found;
+};
+
+struct pipeline_firewall_del_bulk_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG ADD DEFAULT
+ */
+struct pipeline_firewall_add_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ /* data */
+ uint32_t port_id;
+};
+
+struct pipeline_firewall_add_default_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG DEL DEFAULT
+ */
+struct pipeline_firewall_del_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+};
+
+struct pipeline_firewall_del_default_msg_rsp {
+ int status;
+};
+
+extern struct pipeline_be_ops pipeline_firewall_be_ops;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions.c b/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
new file mode 100644
index 00000000..4012121f
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
@@ -0,0 +1,1814 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_flow_actions.h"
+#include "hash_func.h"
+
+/*
+ * Flow actions pipeline
+ */
+#ifndef N_FLOWS_BULK
+#define N_FLOWS_BULK 4096
+#endif
+
+struct app_pipeline_fa_flow {
+ struct pipeline_fa_flow_params params;
+ void *entry_ptr;
+};
+
+struct app_pipeline_fa_dscp {
+ uint32_t traffic_class;
+ enum rte_meter_color color;
+};
+
+struct app_pipeline_fa {
+ /* Parameters */
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ struct pipeline_fa_params params;
+
+ /* Flows */
+ struct app_pipeline_fa_dscp dscp[PIPELINE_FA_N_DSCP];
+ struct app_pipeline_fa_flow *flows;
+} __rte_cache_aligned;
+
+static void*
+app_pipeline_fa_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct app_pipeline_fa *p;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fa));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+ if (pipeline_fa_parse_args(&p->params, params)) {
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(
+ p->params.n_flows * sizeof(struct app_pipeline_fa_flow));
+ p->flows = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p->flows == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Initialization of flow table */
+ for (i = 0; i < p->params.n_flows; i++)
+ pipeline_fa_flow_params_set_default(&p->flows[i].params);
+
+ /* Initialization of DSCP table */
+ for (i = 0; i < RTE_DIM(p->dscp); i++) {
+ p->dscp[i].traffic_class = 0;
+ p->dscp[i].color = e_RTE_METER_GREEN;
+ }
+
+ return (void *) p;
+}
+
+static int
+app_pipeline_fa_free(void *pipeline)
+{
+ struct app_pipeline_fa *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_free(p->flows);
+ rte_free(p);
+
+ return 0;
+}
+
+static int
+flow_params_check(struct app_pipeline_fa *p,
+ __rte_unused uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params)
+{
+ uint32_t mask, i;
+
+ /* Meter */
+
+ /* Policer */
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ struct pipeline_fa_policer_params *p = &params->p[i];
+ uint32_t j;
+
+ if ((mask & policer_update_mask) == 0)
+ continue;
+
+ for (j = 0; j < e_RTE_METER_COLORS; j++) {
+ struct pipeline_fa_policer_action *action =
+ &p->action[j];
+
+ if ((action->drop == 0) &&
+ (action->color >= e_RTE_METER_COLORS))
+ return -1;
+ }
+ }
+
+ /* Port */
+ if (port_update && (params->port_id >= p->n_ports_out))
+ return -1;
+
+ return 0;
+}
+
+int
+app_pipeline_fa_flow_config(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t flow_id,
+ uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params)
+{
+ struct app_pipeline_fa *p;
+ struct app_pipeline_fa_flow *flow;
+
+ struct pipeline_fa_flow_config_msg_req *req;
+ struct pipeline_fa_flow_config_msg_rsp *rsp;
+
+ uint32_t i, mask;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ ((meter_update_mask == 0) &&
+ (policer_update_mask == 0) &&
+ (port_update == 0)) ||
+ (meter_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
+ (policer_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
+ (params == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ if (flow_params_check(p,
+ meter_update_mask,
+ policer_update_mask,
+ port_update,
+ params) != 0)
+ return -1;
+
+ flow_id %= p->params.n_flows;
+ flow = &p->flows[flow_id];
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FA_MSG_REQ_FLOW_CONFIG;
+ req->entry_ptr = flow->entry_ptr;
+ req->flow_id = flow_id;
+ req->meter_update_mask = meter_update_mask;
+ req->policer_update_mask = policer_update_mask;
+ req->port_update = port_update;
+ memcpy(&req->params, params, sizeof(*params));
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL)) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit flow */
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ if ((mask & meter_update_mask) == 0)
+ continue;
+
+ memcpy(&flow->params.m[i], &params->m[i], sizeof(params->m[i]));
+ }
+
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ if ((mask & policer_update_mask) == 0)
+ continue;
+
+ memcpy(&flow->params.p[i], &params->p[i], sizeof(params->p[i]));
+ }
+
+ if (port_update)
+ flow->params.port_id = params->port_id;
+
+ flow->entry_ptr = rsp->entry_ptr;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fa_flow_config_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t *flow_id,
+ uint32_t n_flows,
+ uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params)
+{
+ struct app_pipeline_fa *p;
+ struct pipeline_fa_flow_config_bulk_msg_req *req;
+ struct pipeline_fa_flow_config_bulk_msg_rsp *rsp;
+ void **req_entry_ptr;
+ uint32_t *req_flow_id;
+ uint32_t i;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (flow_id == NULL) ||
+ (n_flows == 0) ||
+ ((meter_update_mask == 0) &&
+ (policer_update_mask == 0) &&
+ (port_update == 0)) ||
+ (meter_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
+ (policer_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
+ (params == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ for (i = 0; i < n_flows; i++) {
+ struct pipeline_fa_flow_params *flow_params = &params[i];
+
+ if (flow_params_check(p,
+ meter_update_mask,
+ policer_update_mask,
+ port_update,
+ flow_params) != 0)
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req_entry_ptr = (void **) rte_malloc(NULL,
+ n_flows * sizeof(void *),
+ RTE_CACHE_LINE_SIZE);
+ if (req_entry_ptr == NULL)
+ return -1;
+
+ req_flow_id = (uint32_t *) rte_malloc(NULL,
+ n_flows * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (req_flow_id == NULL) {
+ rte_free(req_entry_ptr);
+ return -1;
+ }
+
+ for (i = 0; i < n_flows; i++) {
+ uint32_t fid = flow_id[i] % p->params.n_flows;
+ struct app_pipeline_fa_flow *flow = &p->flows[fid];
+
+ req_flow_id[i] = fid;
+ req_entry_ptr[i] = flow->entry_ptr;
+ }
+
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ rte_free(req_flow_id);
+ rte_free(req_entry_ptr);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK;
+ req->entry_ptr = req_entry_ptr;
+ req->flow_id = req_flow_id;
+ req->n_flows = n_flows;
+ req->meter_update_mask = meter_update_mask;
+ req->policer_update_mask = policer_update_mask;
+ req->port_update = port_update;
+ req->params = params;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ rte_free(req_flow_id);
+ rte_free(req_entry_ptr);
+ return -1;
+ }
+
+ /* Read response */
+
+ /* Commit flows */
+ for (i = 0; i < rsp->n_flows; i++) {
+ uint32_t fid = flow_id[i] % p->params.n_flows;
+ struct app_pipeline_fa_flow *flow = &p->flows[fid];
+ struct pipeline_fa_flow_params *flow_params = &params[i];
+ void *entry_ptr = req_entry_ptr[i];
+ uint32_t j, mask;
+
+ for (j = 0, mask = 1; j < PIPELINE_FA_N_TC_MAX;
+ j++, mask <<= 1) {
+ if ((mask & meter_update_mask) == 0)
+ continue;
+
+ memcpy(&flow->params.m[j],
+ &flow_params->m[j],
+ sizeof(flow_params->m[j]));
+ }
+
+ for (j = 0, mask = 1; j < PIPELINE_FA_N_TC_MAX;
+ j++, mask <<= 1) {
+ if ((mask & policer_update_mask) == 0)
+ continue;
+
+ memcpy(&flow->params.p[j],
+ &flow_params->p[j],
+ sizeof(flow_params->p[j]));
+ }
+
+ if (port_update)
+ flow->params.port_id = flow_params->port_id;
+
+ flow->entry_ptr = entry_ptr;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+ rte_free(req_flow_id);
+ rte_free(req_entry_ptr);
+
+ return (rsp->n_flows == n_flows) ? 0 : -1;
+}
+
+int
+app_pipeline_fa_dscp_config(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t dscp,
+ uint32_t traffic_class,
+ enum rte_meter_color color)
+{
+ struct app_pipeline_fa *p;
+
+ struct pipeline_fa_dscp_config_msg_req *req;
+ struct pipeline_fa_dscp_config_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (dscp >= PIPELINE_FA_N_DSCP) ||
+ (traffic_class >= PIPELINE_FA_N_TC_MAX) ||
+ (color >= e_RTE_METER_COLORS))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ if (p->params.dscp_enabled == 0)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FA_MSG_REQ_DSCP_CONFIG;
+ req->dscp = dscp;
+ req->traffic_class = traffic_class;
+ req->color = color;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit DSCP */
+ p->dscp[dscp].traffic_class = traffic_class;
+ p->dscp[dscp].color = color;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t flow_id,
+ uint32_t policer_id,
+ int clear,
+ struct pipeline_fa_policer_stats *stats)
+{
+ struct app_pipeline_fa *p;
+ struct app_pipeline_fa_flow *flow;
+
+ struct pipeline_fa_policer_stats_msg_req *req;
+ struct pipeline_fa_policer_stats_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if ((app == NULL) || (stats == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ flow_id %= p->params.n_flows;
+ flow = &p->flows[flow_id];
+
+ if ((policer_id >= p->params.n_meters_per_flow) ||
+ (flow->entry_ptr == NULL))
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FA_MSG_REQ_POLICER_STATS_READ;
+ req->entry_ptr = flow->entry_ptr;
+ req->policer_id = policer_id;
+ req->clear = clear;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ memcpy(stats, &rsp->stats, sizeof(*stats));
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+static const char *
+color_to_string(enum rte_meter_color color)
+{
+ switch (color) {
+ case e_RTE_METER_GREEN: return "G";
+ case e_RTE_METER_YELLOW: return "Y";
+ case e_RTE_METER_RED: return "R";
+ default: return "?";
+ }
+}
+
+static int
+string_to_color(char *s, enum rte_meter_color *c)
+{
+ if (strcmp(s, "G") == 0) {
+ *c = e_RTE_METER_GREEN;
+ return 0;
+ }
+
+ if (strcmp(s, "Y") == 0) {
+ *c = e_RTE_METER_YELLOW;
+ return 0;
+ }
+
+ if (strcmp(s, "R") == 0) {
+ *c = e_RTE_METER_RED;
+ return 0;
+ }
+
+ return -1;
+}
+
+static const char *
+policer_action_to_string(struct pipeline_fa_policer_action *a)
+{
+ if (a->drop)
+ return "D";
+
+ return color_to_string(a->color);
+}
+
+static int
+string_to_policer_action(char *s, struct pipeline_fa_policer_action *a)
+{
+ if (strcmp(s, "G") == 0) {
+ a->drop = 0;
+ a->color = e_RTE_METER_GREEN;
+ return 0;
+ }
+
+ if (strcmp(s, "Y") == 0) {
+ a->drop = 0;
+ a->color = e_RTE_METER_YELLOW;
+ return 0;
+ }
+
+ if (strcmp(s, "R") == 0) {
+ a->drop = 0;
+ a->color = e_RTE_METER_RED;
+ return 0;
+ }
+
+ if (strcmp(s, "D") == 0) {
+ a->drop = 1;
+ a->color = e_RTE_METER_GREEN;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void
+print_flow(struct app_pipeline_fa *p,
+ uint32_t flow_id,
+ struct app_pipeline_fa_flow *flow)
+{
+ uint32_t i;
+
+ printf("Flow ID = %" PRIu32 "\n", flow_id);
+
+ for (i = 0; i < p->params.n_meters_per_flow; i++) {
+ struct rte_meter_trtcm_params *meter = &flow->params.m[i];
+ struct pipeline_fa_policer_params *policer = &flow->params.p[i];
+
+ printf("\ttrTCM [CIR = %" PRIu64
+ ", CBS = %" PRIu64 ", PIR = %" PRIu64
+ ", PBS = %" PRIu64 "] Policer [G : %s, Y : %s, R : %s]\n",
+ meter->cir,
+ meter->cbs,
+ meter->pir,
+ meter->pbs,
+ policer_action_to_string(&policer->action[e_RTE_METER_GREEN]),
+ policer_action_to_string(&policer->action[e_RTE_METER_YELLOW]),
+ policer_action_to_string(&policer->action[e_RTE_METER_RED]));
+ }
+
+ printf("\tPort %u (entry_ptr = %p)\n",
+ flow->params.port_id,
+ flow->entry_ptr);
+}
+
+
+static int
+app_pipeline_fa_flow_ls(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_fa *p;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ for (i = 0; i < p->params.n_flows; i++) {
+ struct app_pipeline_fa_flow *flow = &p->flows[i];
+
+ print_flow(p, i, flow);
+ }
+
+ return 0;
+}
+
+static int
+app_pipeline_fa_dscp_ls(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_fa *p;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ if (p->params.dscp_enabled == 0)
+ return -1;
+
+ for (i = 0; i < RTE_DIM(p->dscp); i++) {
+ struct app_pipeline_fa_dscp *dscp = &p->dscp[i];
+
+ printf("DSCP = %2" PRIu32 ": Traffic class = %" PRIu32
+ ", Color = %s\n",
+ i,
+ dscp->traffic_class,
+ color_to_string(dscp->color));
+ }
+
+ return 0;
+}
+
+/*
+ * Flow meter configuration (single flow)
+ *
+ * p <pipeline ID> flow <flow ID> meter <meter ID> trtcm <trtcm params>
+ */
+
+struct cmd_fa_meter_config_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ uint32_t flow_id;
+ cmdline_fixed_string_t meter_string;
+ uint32_t meter_id;
+ cmdline_fixed_string_t trtcm_string;
+ uint64_t cir;
+ uint64_t pir;
+ uint64_t cbs;
+ uint64_t pbs;
+};
+
+static void
+cmd_fa_meter_config_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_meter_config_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fa_flow_params flow_params;
+ int status;
+
+ if (params->meter_id >= PIPELINE_FA_N_TC_MAX) {
+ printf("Command failed\n");
+ return;
+ }
+
+ flow_params.m[params->meter_id].cir = params->cir;
+ flow_params.m[params->meter_id].pir = params->pir;
+ flow_params.m[params->meter_id].cbs = params->cbs;
+ flow_params.m[params->meter_id].pbs = params->pbs;
+
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ params->flow_id,
+ 1 << params->meter_id,
+ 0,
+ 0,
+ &flow_params);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fa_meter_config_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_meter_config_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_meter_config_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
+ flow_string, "flow");
+
+cmdline_parse_token_num_t cmd_fa_meter_config_flow_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
+ flow_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_meter_config_meter_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
+ meter_string, "meter");
+
+cmdline_parse_token_num_t cmd_fa_meter_config_meter_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
+ meter_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_meter_config_trtcm_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
+ trtcm_string, "trtcm");
+
+cmdline_parse_token_num_t cmd_fa_meter_config_cir =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, cir, UINT64);
+
+cmdline_parse_token_num_t cmd_fa_meter_config_pir =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, pir, UINT64);
+
+cmdline_parse_token_num_t cmd_fa_meter_config_cbs =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, cbs, UINT64);
+
+cmdline_parse_token_num_t cmd_fa_meter_config_pbs =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, pbs, UINT64);
+
+cmdline_parse_inst_t cmd_fa_meter_config = {
+ .f = cmd_fa_meter_config_parsed,
+ .data = NULL,
+ .help_str = "Flow meter configuration (single flow) ",
+ .tokens = {
+ (void *) &cmd_fa_meter_config_p_string,
+ (void *) &cmd_fa_meter_config_pipeline_id,
+ (void *) &cmd_fa_meter_config_flow_string,
+ (void *) &cmd_fa_meter_config_flow_id,
+ (void *) &cmd_fa_meter_config_meter_string,
+ (void *) &cmd_fa_meter_config_meter_id,
+ (void *) &cmd_fa_meter_config_trtcm_string,
+ (void *) &cmd_fa_meter_config_cir,
+ (void *) &cmd_fa_meter_config_pir,
+ (void *) &cmd_fa_meter_config_cbs,
+ (void *) &cmd_fa_meter_config_pbs,
+ NULL,
+ },
+};
+
+/*
+ * Flow meter configuration (multiple flows)
+ *
+ * p <pipeline ID> flows <n_flows> meter <meter ID> trtcm <trtcm params>
+ */
+
+struct cmd_fa_meter_config_bulk_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flows_string;
+ uint32_t n_flows;
+ cmdline_fixed_string_t meter_string;
+ uint32_t meter_id;
+ cmdline_fixed_string_t trtcm_string;
+ uint64_t cir;
+ uint64_t pir;
+ uint64_t cbs;
+ uint64_t pbs;
+};
+
+static void
+cmd_fa_meter_config_bulk_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_meter_config_bulk_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fa_flow_params flow_template, *flow_params;
+ uint32_t *flow_id;
+ uint32_t i;
+
+ if ((params->n_flows == 0) ||
+ (params->meter_id >= PIPELINE_FA_N_TC_MAX)) {
+ printf("Invalid arguments\n");
+ return;
+ }
+
+ flow_id = (uint32_t *) rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_id == NULL) {
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_params == NULL) {
+ rte_free(flow_id);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ memset(&flow_template, 0, sizeof(flow_template));
+ flow_template.m[params->meter_id].cir = params->cir;
+ flow_template.m[params->meter_id].pir = params->pir;
+ flow_template.m[params->meter_id].cbs = params->cbs;
+ flow_template.m[params->meter_id].pbs = params->pbs;
+
+ for (i = 0; i < params->n_flows; i++) {
+ uint32_t pos = i % N_FLOWS_BULK;
+
+ flow_id[pos] = i;
+ memcpy(&flow_params[pos],
+ &flow_template,
+ sizeof(flow_template));
+
+ if ((pos == N_FLOWS_BULK - 1) ||
+ (i == params->n_flows - 1)) {
+ int status;
+
+ status = app_pipeline_fa_flow_config_bulk(app,
+ params->pipeline_id,
+ flow_id,
+ pos + 1,
+ 1 << params->meter_id,
+ 0,
+ 0,
+ flow_params);
+
+ if (status != 0) {
+ printf("Command failed\n");
+
+ break;
+ }
+ }
+ }
+
+ rte_free(flow_params);
+ rte_free(flow_id);
+
+}
+
+cmdline_parse_token_string_t cmd_fa_meter_config_bulk_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_meter_config_bulk_flows_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ flows_string, "flows");
+
+cmdline_parse_token_num_t cmd_fa_meter_config_bulk_n_flows =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ n_flows, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_meter_config_bulk_meter_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ meter_string, "meter");
+
+cmdline_parse_token_num_t cmd_fa_meter_config_bulk_meter_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ meter_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_meter_config_bulk_trtcm_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ trtcm_string, "trtcm");
+
+cmdline_parse_token_num_t cmd_fa_meter_config_bulk_cir =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ cir, UINT64);
+
+cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pir =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ pir, UINT64);
+
+cmdline_parse_token_num_t cmd_fa_meter_config_bulk_cbs =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ cbs, UINT64);
+
+cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pbs =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
+ pbs, UINT64);
+
+cmdline_parse_inst_t cmd_fa_meter_config_bulk = {
+ .f = cmd_fa_meter_config_bulk_parsed,
+ .data = NULL,
+ .help_str = "Flow meter configuration (multiple flows)",
+ .tokens = {
+ (void *) &cmd_fa_meter_config_bulk_p_string,
+ (void *) &cmd_fa_meter_config_bulk_pipeline_id,
+ (void *) &cmd_fa_meter_config_bulk_flows_string,
+ (void *) &cmd_fa_meter_config_bulk_n_flows,
+ (void *) &cmd_fa_meter_config_bulk_meter_string,
+ (void *) &cmd_fa_meter_config_bulk_meter_id,
+ (void *) &cmd_fa_meter_config_bulk_trtcm_string,
+ (void *) &cmd_fa_meter_config_cir,
+ (void *) &cmd_fa_meter_config_pir,
+ (void *) &cmd_fa_meter_config_cbs,
+ (void *) &cmd_fa_meter_config_pbs,
+ NULL,
+ },
+};
+
+/*
+ * Flow policer configuration (single flow)
+ *
+ * p <pipeline ID> flow <flow ID> policer <policer ID>
+ * G <action> Y <action> R <action>
+ *
+ * <action> = G (green) | Y (yellow) | R (red) | D (drop)
+ */
+
+struct cmd_fa_policer_config_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ uint32_t flow_id;
+ cmdline_fixed_string_t policer_string;
+ uint32_t policer_id;
+ cmdline_fixed_string_t green_string;
+ cmdline_fixed_string_t g_action;
+ cmdline_fixed_string_t yellow_string;
+ cmdline_fixed_string_t y_action;
+ cmdline_fixed_string_t red_string;
+ cmdline_fixed_string_t r_action;
+};
+
+static void
+cmd_fa_policer_config_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_policer_config_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fa_flow_params flow_params;
+ int status;
+
+ if (params->policer_id >= PIPELINE_FA_N_TC_MAX) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = string_to_policer_action(params->g_action,
+ &flow_params.p[params->policer_id].action[e_RTE_METER_GREEN]);
+ if (status)
+ printf("Invalid policer green action\n");
+
+ status = string_to_policer_action(params->y_action,
+ &flow_params.p[params->policer_id].action[e_RTE_METER_YELLOW]);
+ if (status)
+ printf("Invalid policer yellow action\n");
+
+ status = string_to_policer_action(params->r_action,
+ &flow_params.p[params->policer_id].action[e_RTE_METER_RED]);
+ if (status)
+ printf("Invalid policer red action\n");
+
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ params->flow_id,
+ 0,
+ 1 << params->policer_id,
+ 0,
+ &flow_params);
+
+ if (status != 0)
+ printf("Command failed\n");
+
+}
+
+cmdline_parse_token_string_t cmd_fa_policer_config_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_policer_config_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_config_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ flow_string, "flow");
+
+cmdline_parse_token_num_t cmd_fa_policer_config_flow_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
+ flow_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_config_policer_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ policer_string, "policer");
+
+cmdline_parse_token_num_t cmd_fa_policer_config_policer_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
+ policer_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_config_green_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ green_string, "G");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_g_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ g_action, "R#Y#G#D");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_yellow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ yellow_string, "Y");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_y_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ y_action, "R#Y#G#D");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_red_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ red_string, "R");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_r_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
+ r_action, "R#Y#G#D");
+
+cmdline_parse_inst_t cmd_fa_policer_config = {
+ .f = cmd_fa_policer_config_parsed,
+ .data = NULL,
+ .help_str = "Flow policer configuration (single flow)",
+ .tokens = {
+ (void *) &cmd_fa_policer_config_p_string,
+ (void *) &cmd_fa_policer_config_pipeline_id,
+ (void *) &cmd_fa_policer_config_flow_string,
+ (void *) &cmd_fa_policer_config_flow_id,
+ (void *) &cmd_fa_policer_config_policer_string,
+ (void *) &cmd_fa_policer_config_policer_id,
+ (void *) &cmd_fa_policer_config_green_string,
+ (void *) &cmd_fa_policer_config_g_action,
+ (void *) &cmd_fa_policer_config_yellow_string,
+ (void *) &cmd_fa_policer_config_y_action,
+ (void *) &cmd_fa_policer_config_red_string,
+ (void *) &cmd_fa_policer_config_r_action,
+ NULL,
+ },
+};
+
+/*
+ * Flow policer configuration (multiple flows)
+ *
+ * p <pipeline ID> flows <n_flows> policer <policer ID>
+ * G <action> Y <action> R <action>
+ *
+ * <action> = G (green) | Y (yellow) | R (red) | D (drop)
+ */
+
+struct cmd_fa_policer_config_bulk_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flows_string;
+ uint32_t n_flows;
+ cmdline_fixed_string_t policer_string;
+ uint32_t policer_id;
+ cmdline_fixed_string_t green_string;
+ cmdline_fixed_string_t g_action;
+ cmdline_fixed_string_t yellow_string;
+ cmdline_fixed_string_t y_action;
+ cmdline_fixed_string_t red_string;
+ cmdline_fixed_string_t r_action;
+};
+
+static void
+cmd_fa_policer_config_bulk_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_policer_config_bulk_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fa_flow_params flow_template, *flow_params;
+ uint32_t *flow_id, i;
+ int status;
+
+ if ((params->n_flows == 0) ||
+ (params->policer_id >= PIPELINE_FA_N_TC_MAX)) {
+ printf("Invalid arguments\n");
+ return;
+ }
+
+ flow_id = (uint32_t *) rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_id == NULL) {
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_params == NULL) {
+ rte_free(flow_id);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ memset(&flow_template, 0, sizeof(flow_template));
+
+ status = string_to_policer_action(params->g_action,
+ &flow_template.p[params->policer_id].action[e_RTE_METER_GREEN]);
+ if (status)
+ printf("Invalid policer green action\n");
+
+ status = string_to_policer_action(params->y_action,
+ &flow_template.p[params->policer_id].action[e_RTE_METER_YELLOW]);
+ if (status)
+ printf("Invalid policer yellow action\n");
+
+ status = string_to_policer_action(params->r_action,
+ &flow_template.p[params->policer_id].action[e_RTE_METER_RED]);
+ if (status)
+ printf("Invalid policer red action\n");
+
+ for (i = 0; i < params->n_flows; i++) {
+ uint32_t pos = i % N_FLOWS_BULK;
+
+ flow_id[pos] = i;
+ memcpy(&flow_params[pos], &flow_template,
+ sizeof(flow_template));
+
+ if ((pos == N_FLOWS_BULK - 1) ||
+ (i == params->n_flows - 1)) {
+ int status;
+
+ status = app_pipeline_fa_flow_config_bulk(app,
+ params->pipeline_id,
+ flow_id,
+ pos + 1,
+ 0,
+ 1 << params->policer_id,
+ 0,
+ flow_params);
+
+ if (status != 0) {
+ printf("Command failed\n");
+
+ break;
+ }
+ }
+ }
+
+ rte_free(flow_params);
+ rte_free(flow_id);
+
+}
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_policer_config_bulk_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_flows_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ flows_string, "flows");
+
+cmdline_parse_token_num_t cmd_fa_policer_config_bulk_n_flows =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ n_flows, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_policer_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ policer_string, "policer");
+
+cmdline_parse_token_num_t cmd_fa_policer_config_bulk_policer_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ policer_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_green_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ green_string, "G");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_g_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ g_action, "R#Y#G#D");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_yellow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ yellow_string, "Y");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_y_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ y_action, "R#Y#G#D");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_red_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ red_string, "R");
+
+cmdline_parse_token_string_t cmd_fa_policer_config_bulk_r_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
+ r_action, "R#Y#G#D");
+
+cmdline_parse_inst_t cmd_fa_policer_config_bulk = {
+ .f = cmd_fa_policer_config_bulk_parsed,
+ .data = NULL,
+ .help_str = "Flow policer configuration (multiple flows)",
+ .tokens = {
+ (void *) &cmd_fa_policer_config_bulk_p_string,
+ (void *) &cmd_fa_policer_config_bulk_pipeline_id,
+ (void *) &cmd_fa_policer_config_bulk_flows_string,
+ (void *) &cmd_fa_policer_config_bulk_n_flows,
+ (void *) &cmd_fa_policer_config_bulk_policer_string,
+ (void *) &cmd_fa_policer_config_bulk_policer_id,
+ (void *) &cmd_fa_policer_config_bulk_green_string,
+ (void *) &cmd_fa_policer_config_bulk_g_action,
+ (void *) &cmd_fa_policer_config_bulk_yellow_string,
+ (void *) &cmd_fa_policer_config_bulk_y_action,
+ (void *) &cmd_fa_policer_config_bulk_red_string,
+ (void *) &cmd_fa_policer_config_bulk_r_action,
+ NULL,
+ },
+};
+
+/*
+ * Flow output port configuration (single flow)
+ *
+ * p <pipeline ID> flow <flow ID> port <port ID>
+ */
+
+struct cmd_fa_output_port_config_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ uint32_t flow_id;
+ cmdline_fixed_string_t port_string;
+ uint32_t port_id;
+};
+
+static void
+cmd_fa_output_port_config_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_output_port_config_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fa_flow_params flow_params;
+ int status;
+
+ flow_params.port_id = params->port_id;
+
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ params->flow_id,
+ 0,
+ 0,
+ 1,
+ &flow_params);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fa_output_port_config_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_output_port_config_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_output_port_config_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
+ flow_string, "flow");
+
+cmdline_parse_token_num_t cmd_fa_output_port_config_flow_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
+ flow_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_output_port_config_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
+ port_string, "port");
+
+cmdline_parse_token_num_t cmd_fa_output_port_config_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
+ port_id, UINT32);
+
+cmdline_parse_inst_t cmd_fa_output_port_config = {
+ .f = cmd_fa_output_port_config_parsed,
+ .data = NULL,
+ .help_str = "Flow output port configuration (single flow)",
+ .tokens = {
+ (void *) &cmd_fa_output_port_config_p_string,
+ (void *) &cmd_fa_output_port_config_pipeline_id,
+ (void *) &cmd_fa_output_port_config_flow_string,
+ (void *) &cmd_fa_output_port_config_flow_id,
+ (void *) &cmd_fa_output_port_config_port_string,
+ (void *) &cmd_fa_output_port_config_port_id,
+ NULL,
+ },
+};
+
+/*
+ * Flow output port configuration (multiple flows)
+ *
+ * p <pipeline ID> flows <n_flows> ports <n_ports>
+ */
+
+struct cmd_fa_output_port_config_bulk_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flows_string;
+ uint32_t n_flows;
+ cmdline_fixed_string_t ports_string;
+ uint32_t n_ports;
+};
+
+static void
+cmd_fa_output_port_config_bulk_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_output_port_config_bulk_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fa_flow_params *flow_params;
+ uint32_t *flow_id;
+ uint32_t i;
+
+ if (params->n_flows == 0) {
+ printf("Invalid arguments\n");
+ return;
+ }
+
+ flow_id = (uint32_t *) rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_id == NULL) {
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_params == NULL) {
+ rte_free(flow_id);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ for (i = 0; i < params->n_flows; i++) {
+ uint32_t pos = i % N_FLOWS_BULK;
+ uint32_t port_id = i % params->n_ports;
+
+ flow_id[pos] = i;
+
+ memset(&flow_params[pos], 0, sizeof(flow_params[pos]));
+ flow_params[pos].port_id = port_id;
+
+ if ((pos == N_FLOWS_BULK - 1) ||
+ (i == params->n_flows - 1)) {
+ int status;
+
+ status = app_pipeline_fa_flow_config_bulk(app,
+ params->pipeline_id,
+ flow_id,
+ pos + 1,
+ 0,
+ 0,
+ 1,
+ flow_params);
+
+ if (status != 0) {
+ printf("Command failed\n");
+
+ break;
+ }
+ }
+ }
+
+ rte_free(flow_params);
+ rte_free(flow_id);
+
+}
+
+cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_flows_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
+ flows_string, "flows");
+
+cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_n_flows =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
+ n_flows, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_ports_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
+ ports_string, "ports");
+
+cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_n_ports =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
+ n_ports, UINT32);
+
+cmdline_parse_inst_t cmd_fa_output_port_config_bulk = {
+ .f = cmd_fa_output_port_config_bulk_parsed,
+ .data = NULL,
+ .help_str = "Flow output port configuration (multiple flows)",
+ .tokens = {
+ (void *) &cmd_fa_output_port_config_bulk_p_string,
+ (void *) &cmd_fa_output_port_config_bulk_pipeline_id,
+ (void *) &cmd_fa_output_port_config_bulk_flows_string,
+ (void *) &cmd_fa_output_port_config_bulk_n_flows,
+ (void *) &cmd_fa_output_port_config_bulk_ports_string,
+ (void *) &cmd_fa_output_port_config_bulk_n_ports,
+ NULL,
+ },
+};
+
+/*
+ * Flow DiffServ Code Point (DSCP) translation table configuration
+ *
+ * p <pipeline ID> dscp <DSCP ID> class <traffic class ID> color <color>
+ *
+ * <color> = G (green) | Y (yellow) | R (red)
+*/
+
+struct cmd_fa_dscp_config_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t dscp_string;
+ uint32_t dscp_id;
+ cmdline_fixed_string_t class_string;
+ uint32_t traffic_class_id;
+ cmdline_fixed_string_t color_string;
+ cmdline_fixed_string_t color;
+
+};
+
+static void
+cmd_fa_dscp_config_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_dscp_config_result *params = parsed_result;
+ struct app_params *app = data;
+ enum rte_meter_color color;
+ int status;
+
+ status = string_to_color(params->color, &color);
+ if (status) {
+ printf("Invalid color\n");
+ return;
+ }
+
+ status = app_pipeline_fa_dscp_config(app,
+ params->pipeline_id,
+ params->dscp_id,
+ params->traffic_class_id,
+ color);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fa_dscp_config_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_dscp_config_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_dscp_config_dscp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
+ dscp_string, "dscp");
+
+cmdline_parse_token_num_t cmd_fa_dscp_config_dscp_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
+ dscp_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_dscp_config_class_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
+ class_string, "class");
+
+cmdline_parse_token_num_t cmd_fa_dscp_config_traffic_class_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
+ traffic_class_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_dscp_config_color_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
+ color_string, "color");
+
+cmdline_parse_token_string_t cmd_fa_dscp_config_color =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
+ color, "G#Y#R");
+
+cmdline_parse_inst_t cmd_fa_dscp_config = {
+ .f = cmd_fa_dscp_config_parsed,
+ .data = NULL,
+ .help_str = "Flow DSCP translation table configuration",
+ .tokens = {
+ (void *) &cmd_fa_dscp_config_p_string,
+ (void *) &cmd_fa_dscp_config_pipeline_id,
+ (void *) &cmd_fa_dscp_config_dscp_string,
+ (void *) &cmd_fa_dscp_config_dscp_id,
+ (void *) &cmd_fa_dscp_config_class_string,
+ (void *) &cmd_fa_dscp_config_traffic_class_id,
+ (void *) &cmd_fa_dscp_config_color_string,
+ (void *) &cmd_fa_dscp_config_color,
+ NULL,
+ },
+};
+
+/*
+ * Flow policer stats read
+ *
+ * p <pipeline ID> flow <flow ID> policer <policer ID> stats
+ */
+
+struct cmd_fa_policer_stats_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ uint32_t flow_id;
+ cmdline_fixed_string_t policer_string;
+ uint32_t policer_id;
+ cmdline_fixed_string_t stats_string;
+};
+
+static void
+cmd_fa_policer_stats_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_policer_stats_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fa_policer_stats stats;
+ int status;
+
+ status = app_pipeline_fa_flow_policer_stats_read(app,
+ params->pipeline_id,
+ params->flow_id,
+ params->policer_id,
+ 1,
+ &stats);
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("\tPkts G: %" PRIu64
+ "\tPkts Y: %" PRIu64
+ "\tPkts R: %" PRIu64
+ "\tPkts D: %" PRIu64 "\n",
+ stats.n_pkts[e_RTE_METER_GREEN],
+ stats.n_pkts[e_RTE_METER_YELLOW],
+ stats.n_pkts[e_RTE_METER_RED],
+ stats.n_pkts_drop);
+}
+
+cmdline_parse_token_string_t cmd_fa_policer_stats_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_policer_stats_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_stats_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
+ flow_string, "flow");
+
+cmdline_parse_token_num_t cmd_fa_policer_stats_flow_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
+ flow_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_stats_policer_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
+ policer_string, "policer");
+
+cmdline_parse_token_num_t cmd_fa_policer_stats_policer_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
+ policer_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_policer_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
+ stats_string, "stats");
+
+cmdline_parse_inst_t cmd_fa_policer_stats = {
+ .f = cmd_fa_policer_stats_parsed,
+ .data = NULL,
+ .help_str = "Flow policer stats read",
+ .tokens = {
+ (void *) &cmd_fa_policer_stats_p_string,
+ (void *) &cmd_fa_policer_stats_pipeline_id,
+ (void *) &cmd_fa_policer_stats_flow_string,
+ (void *) &cmd_fa_policer_stats_flow_id,
+ (void *) &cmd_fa_policer_stats_policer_string,
+ (void *) &cmd_fa_policer_stats_policer_id,
+ (void *) &cmd_fa_policer_stats_string,
+ NULL,
+ },
+};
+
+/*
+ * Flow list
+ *
+ * p <pipeline ID> flow ls
+ */
+
+struct cmd_fa_flow_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t actions_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+static void
+cmd_fa_flow_ls_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_flow_ls_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_fa_flow_ls(app, params->pipeline_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fa_flow_ls_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_flow_ls_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_flow_ls_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_flow_ls_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
+ flow_string, "flow");
+
+cmdline_parse_token_string_t cmd_fa_flow_ls_actions_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
+ actions_string, "actions");
+
+cmdline_parse_token_string_t cmd_fa_flow_ls_ls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
+ ls_string, "ls");
+
+cmdline_parse_inst_t cmd_fa_flow_ls = {
+ .f = cmd_fa_flow_ls_parsed,
+ .data = NULL,
+ .help_str = "Flow actions list",
+ .tokens = {
+ (void *) &cmd_fa_flow_ls_p_string,
+ (void *) &cmd_fa_flow_ls_pipeline_id,
+ (void *) &cmd_fa_flow_ls_flow_string,
+ (void *) &cmd_fa_flow_ls_actions_string,
+ (void *) &cmd_fa_flow_ls_ls_string,
+ NULL,
+ },
+};
+
+/*
+ * Flow DiffServ Code Point (DSCP) translation table list
+ *
+ * p <pipeline ID> dscp ls
+ */
+
+struct cmd_fa_dscp_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t dscp_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+static void
+cmd_fa_dscp_ls_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fa_dscp_ls_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_fa_dscp_ls(app, params->pipeline_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fa_dscp_ls_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fa_dscp_ls_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_ls_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fa_dscp_ls_dscp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result,
+ dscp_string, "dscp");
+
+cmdline_parse_token_string_t cmd_fa_dscp_ls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result, ls_string,
+ "ls");
+
+cmdline_parse_inst_t cmd_fa_dscp_ls = {
+ .f = cmd_fa_dscp_ls_parsed,
+ .data = NULL,
+ .help_str = "Flow DSCP translaton table list",
+ .tokens = {
+ (void *) &cmd_fa_dscp_ls_p_string,
+ (void *) &cmd_fa_dscp_ls_pipeline_id,
+ (void *) &cmd_fa_dscp_ls_dscp_string,
+ (void *) &cmd_fa_dscp_ls_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_fa_meter_config,
+ (cmdline_parse_inst_t *) &cmd_fa_meter_config_bulk,
+ (cmdline_parse_inst_t *) &cmd_fa_policer_config,
+ (cmdline_parse_inst_t *) &cmd_fa_policer_config_bulk,
+ (cmdline_parse_inst_t *) &cmd_fa_output_port_config,
+ (cmdline_parse_inst_t *) &cmd_fa_output_port_config_bulk,
+ (cmdline_parse_inst_t *) &cmd_fa_dscp_config,
+ (cmdline_parse_inst_t *) &cmd_fa_policer_stats,
+ (cmdline_parse_inst_t *) &cmd_fa_flow_ls,
+ (cmdline_parse_inst_t *) &cmd_fa_dscp_ls,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_flow_actions_fe_ops = {
+ .f_init = app_pipeline_fa_init,
+ .f_free = app_pipeline_fa_free,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_flow_actions = {
+ .name = "FLOW_ACTIONS",
+ .be_ops = &pipeline_flow_actions_be_ops,
+ .fe_ops = &pipeline_flow_actions_fe_ops,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions.h b/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
new file mode 100644
index 00000000..f2cd0cbb
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
@@ -0,0 +1,78 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FLOW_ACTIONS_H__
+#define __INCLUDE_PIPELINE_FLOW_ACTIONS_H__
+
+#include <rte_meter.h>
+
+#include "pipeline.h"
+#include "pipeline_flow_actions_be.h"
+
+int
+app_pipeline_fa_flow_config(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t flow_id,
+ uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params);
+
+int
+app_pipeline_fa_flow_config_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t *flow_id,
+ uint32_t n_flows,
+ uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params);
+
+int
+app_pipeline_fa_dscp_config(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t dscp,
+ uint32_t traffic_class,
+ enum rte_meter_color color);
+
+int
+app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t flow_id,
+ uint32_t policer_id,
+ int clear,
+ struct pipeline_fa_policer_stats *stats);
+
+extern struct pipeline_type pipeline_flow_actions;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
new file mode 100644
index 00000000..3ad3ee63
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
@@ -0,0 +1,1011 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_table_array.h>
+#include <rte_byteorder.h>
+#include <rte_ip.h>
+
+#include "pipeline_actions_common.h"
+#include "pipeline_flow_actions_be.h"
+#include "parser.h"
+#include "hash_func.h"
+
+int
+pipeline_fa_flow_params_set_default(struct pipeline_fa_flow_params *params)
+{
+ uint32_t i;
+
+ if (params == NULL)
+ return -1;
+
+ for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
+ struct rte_meter_trtcm_params *m = &params->m[i];
+
+ m->cir = 1;
+ m->cbs = 1;
+ m->pir = 1;
+ m->pbs = 2;
+ }
+
+ for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
+ struct pipeline_fa_policer_params *p = &params->p[i];
+ uint32_t j;
+
+ for (j = 0; j < e_RTE_METER_COLORS; j++) {
+ struct pipeline_fa_policer_action *a = &p->action[j];
+
+ a->drop = 0;
+ a->color = (enum rte_meter_color) j;
+ }
+ }
+
+ params->port_id = 0;
+
+ return 0;
+}
+
+struct dscp_entry {
+ uint32_t traffic_class;
+ enum rte_meter_color color;
+};
+
+struct pipeline_flow_actions {
+ struct pipeline p;
+ struct pipeline_fa_params params;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_FA_MSG_REQS];
+
+ struct dscp_entry dscp[PIPELINE_FA_N_DSCP];
+} __rte_cache_aligned;
+
+static void *
+pipeline_fa_msg_req_custom_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_fa_msg_req_custom_handler,
+};
+
+static void *
+pipeline_fa_msg_req_flow_config_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fa_msg_req_flow_config_bulk_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fa_msg_req_dscp_config_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fa_msg_req_policer_stats_read_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_FA_MSG_REQ_FLOW_CONFIG] =
+ pipeline_fa_msg_req_flow_config_handler,
+ [PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK] =
+ pipeline_fa_msg_req_flow_config_bulk_handler,
+ [PIPELINE_FA_MSG_REQ_DSCP_CONFIG] =
+ pipeline_fa_msg_req_dscp_config_handler,
+ [PIPELINE_FA_MSG_REQ_POLICER_STATS_READ] =
+ pipeline_fa_msg_req_policer_stats_read_handler,
+};
+
+/*
+ * Flow table
+ */
+struct meter_policer {
+ struct rte_meter_trtcm meter;
+ struct pipeline_fa_policer_params policer;
+ struct pipeline_fa_policer_stats stats;
+};
+
+struct flow_table_entry {
+ struct rte_pipeline_table_entry head;
+ struct meter_policer mp[PIPELINE_FA_N_TC_MAX];
+};
+
+static int
+flow_table_entry_set_meter(struct flow_table_entry *entry,
+ uint32_t meter_id,
+ struct pipeline_fa_flow_params *params)
+{
+ struct rte_meter_trtcm *meter = &entry->mp[meter_id].meter;
+ struct rte_meter_trtcm_params *meter_params = &params->m[meter_id];
+
+ return rte_meter_trtcm_config(meter, meter_params);
+}
+
+static void
+flow_table_entry_set_policer(struct flow_table_entry *entry,
+ uint32_t policer_id,
+ struct pipeline_fa_flow_params *params)
+{
+ struct pipeline_fa_policer_params *p0 = &entry->mp[policer_id].policer;
+ struct pipeline_fa_policer_params *p1 = &params->p[policer_id];
+
+ memcpy(p0, p1, sizeof(*p0));
+}
+
+static void
+flow_table_entry_set_port_id(struct pipeline_flow_actions *p,
+ struct flow_table_entry *entry,
+ struct pipeline_fa_flow_params *params)
+{
+ entry->head.action = RTE_PIPELINE_ACTION_PORT;
+ entry->head.port_id = p->p.port_out_id[params->port_id];
+}
+
+static int
+flow_table_entry_set_default(struct pipeline_flow_actions *p,
+ struct flow_table_entry *entry)
+{
+ struct pipeline_fa_flow_params params;
+ uint32_t i;
+
+ pipeline_fa_flow_params_set_default(&params);
+
+ memset(entry, 0, sizeof(*entry));
+
+ flow_table_entry_set_port_id(p, entry, &params);
+
+ for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
+ int status;
+
+ status = flow_table_entry_set_meter(entry, i, &params);
+ if (status)
+ return status;
+ }
+
+ for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++)
+ flow_table_entry_set_policer(entry, i, &params);
+
+ return 0;
+}
+
+static inline uint64_t
+pkt_work(
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry *table_entry,
+ void *arg,
+ uint64_t time)
+{
+ struct pipeline_flow_actions *p = arg;
+ struct flow_table_entry *entry =
+ (struct flow_table_entry *) table_entry;
+
+ struct ipv4_hdr *pkt_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, p->params.ip_hdr_offset);
+ enum rte_meter_color *pkt_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, p->params.color_offset);
+
+ /* Read (IP header) */
+ uint32_t total_length = rte_bswap16(pkt_ip->total_length);
+ uint32_t dscp = pkt_ip->type_of_service >> 2;
+
+ uint32_t tc = p->dscp[dscp].traffic_class;
+ enum rte_meter_color color = p->dscp[dscp].color;
+
+ struct rte_meter_trtcm *meter = &entry->mp[tc].meter;
+ struct pipeline_fa_policer_params *policer = &entry->mp[tc].policer;
+ struct pipeline_fa_policer_stats *stats = &entry->mp[tc].stats;
+
+ /* Read (entry), compute */
+ enum rte_meter_color color2 = rte_meter_trtcm_color_aware_check(meter,
+ time,
+ total_length,
+ color);
+
+ enum rte_meter_color color3 = policer->action[color2].color;
+ uint64_t drop = policer->action[color2].drop;
+
+ /* Read (entry), write (entry, color) */
+ stats->n_pkts[color3] += drop ^ 1LLU;
+ stats->n_pkts_drop += drop;
+ *pkt_color = color3;
+
+ return drop;
+}
+
+static inline uint64_t
+pkt4_work(
+ struct rte_mbuf **pkts,
+ struct rte_pipeline_table_entry **table_entries,
+ void *arg,
+ uint64_t time)
+{
+ struct pipeline_flow_actions *p = arg;
+
+ struct flow_table_entry *entry0 =
+ (struct flow_table_entry *) table_entries[0];
+ struct flow_table_entry *entry1 =
+ (struct flow_table_entry *) table_entries[1];
+ struct flow_table_entry *entry2 =
+ (struct flow_table_entry *) table_entries[2];
+ struct flow_table_entry *entry3 =
+ (struct flow_table_entry *) table_entries[3];
+
+ struct ipv4_hdr *pkt0_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p->params.ip_hdr_offset);
+ struct ipv4_hdr *pkt1_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p->params.ip_hdr_offset);
+ struct ipv4_hdr *pkt2_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p->params.ip_hdr_offset);
+ struct ipv4_hdr *pkt3_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p->params.ip_hdr_offset);
+
+ enum rte_meter_color *pkt0_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p->params.color_offset);
+ enum rte_meter_color *pkt1_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p->params.color_offset);
+ enum rte_meter_color *pkt2_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p->params.color_offset);
+ enum rte_meter_color *pkt3_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p->params.color_offset);
+
+ /* Read (IP header) */
+ uint32_t total_length0 = rte_bswap16(pkt0_ip->total_length);
+ uint32_t dscp0 = pkt0_ip->type_of_service >> 2;
+
+ uint32_t total_length1 = rte_bswap16(pkt1_ip->total_length);
+ uint32_t dscp1 = pkt1_ip->type_of_service >> 2;
+
+ uint32_t total_length2 = rte_bswap16(pkt2_ip->total_length);
+ uint32_t dscp2 = pkt2_ip->type_of_service >> 2;
+
+ uint32_t total_length3 = rte_bswap16(pkt3_ip->total_length);
+ uint32_t dscp3 = pkt3_ip->type_of_service >> 2;
+
+ uint32_t tc0 = p->dscp[dscp0].traffic_class;
+ enum rte_meter_color color0 = p->dscp[dscp0].color;
+
+ uint32_t tc1 = p->dscp[dscp1].traffic_class;
+ enum rte_meter_color color1 = p->dscp[dscp1].color;
+
+ uint32_t tc2 = p->dscp[dscp2].traffic_class;
+ enum rte_meter_color color2 = p->dscp[dscp2].color;
+
+ uint32_t tc3 = p->dscp[dscp3].traffic_class;
+ enum rte_meter_color color3 = p->dscp[dscp3].color;
+
+ struct rte_meter_trtcm *meter0 = &entry0->mp[tc0].meter;
+ struct pipeline_fa_policer_params *policer0 = &entry0->mp[tc0].policer;
+ struct pipeline_fa_policer_stats *stats0 = &entry0->mp[tc0].stats;
+
+ struct rte_meter_trtcm *meter1 = &entry1->mp[tc1].meter;
+ struct pipeline_fa_policer_params *policer1 = &entry1->mp[tc1].policer;
+ struct pipeline_fa_policer_stats *stats1 = &entry1->mp[tc1].stats;
+
+ struct rte_meter_trtcm *meter2 = &entry2->mp[tc2].meter;
+ struct pipeline_fa_policer_params *policer2 = &entry2->mp[tc2].policer;
+ struct pipeline_fa_policer_stats *stats2 = &entry2->mp[tc2].stats;
+
+ struct rte_meter_trtcm *meter3 = &entry3->mp[tc3].meter;
+ struct pipeline_fa_policer_params *policer3 = &entry3->mp[tc3].policer;
+ struct pipeline_fa_policer_stats *stats3 = &entry3->mp[tc3].stats;
+
+ /* Read (entry), compute, write (entry) */
+ enum rte_meter_color color2_0 = rte_meter_trtcm_color_aware_check(
+ meter0,
+ time,
+ total_length0,
+ color0);
+
+ enum rte_meter_color color2_1 = rte_meter_trtcm_color_aware_check(
+ meter1,
+ time,
+ total_length1,
+ color1);
+
+ enum rte_meter_color color2_2 = rte_meter_trtcm_color_aware_check(
+ meter2,
+ time,
+ total_length2,
+ color2);
+
+ enum rte_meter_color color2_3 = rte_meter_trtcm_color_aware_check(
+ meter3,
+ time,
+ total_length3,
+ color3);
+
+ enum rte_meter_color color3_0 = policer0->action[color2_0].color;
+ enum rte_meter_color color3_1 = policer1->action[color2_1].color;
+ enum rte_meter_color color3_2 = policer2->action[color2_2].color;
+ enum rte_meter_color color3_3 = policer3->action[color2_3].color;
+
+ uint64_t drop0 = policer0->action[color2_0].drop;
+ uint64_t drop1 = policer1->action[color2_1].drop;
+ uint64_t drop2 = policer2->action[color2_2].drop;
+ uint64_t drop3 = policer3->action[color2_3].drop;
+
+ /* Read (entry), write (entry, color) */
+ stats0->n_pkts[color3_0] += drop0 ^ 1LLU;
+ stats0->n_pkts_drop += drop0;
+
+ stats1->n_pkts[color3_1] += drop1 ^ 1LLU;
+ stats1->n_pkts_drop += drop1;
+
+ stats2->n_pkts[color3_2] += drop2 ^ 1LLU;
+ stats2->n_pkts_drop += drop2;
+
+ stats3->n_pkts[color3_3] += drop3 ^ 1LLU;
+ stats3->n_pkts_drop += drop3;
+
+ *pkt0_color = color3_0;
+ *pkt1_color = color3_1;
+ *pkt2_color = color3_2;
+ *pkt3_color = color3_3;
+
+ return drop0 | (drop1 << 1) | (drop2 << 2) | (drop3 << 3);
+}
+
+PIPELINE_TABLE_AH_HIT_DROP_TIME(fa_table_ah_hit, pkt_work, pkt4_work);
+
+static rte_pipeline_table_action_handler_hit
+get_fa_table_ah_hit(__rte_unused struct pipeline_flow_actions *p)
+{
+ return fa_table_ah_hit;
+}
+
+/*
+ * Argument parsing
+ */
+int
+pipeline_fa_parse_args(struct pipeline_fa_params *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_flows_present = 0;
+ uint32_t n_meters_per_flow_present = 0;
+ uint32_t flow_id_offset_present = 0;
+ uint32_t ip_hdr_offset_present = 0;
+ uint32_t color_offset_present = 0;
+ uint32_t i;
+
+ /* Default values */
+ p->n_meters_per_flow = 1;
+ p->dscp_enabled = 0;
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* n_flows */
+ if (strcmp(arg_name, "n_flows") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_flows_present == 0, params->name,
+ arg_name);
+ n_flows_present = 1;
+
+ status = parser_read_uint32(&p->n_flows,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->n_flows != 0)), params->name,
+ arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* n_meters_per_flow */
+ if (strcmp(arg_name, "n_meters_per_flow") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_meters_per_flow_present == 0,
+ params->name, arg_name);
+ n_meters_per_flow_present = 1;
+
+ status = parser_read_uint32(&p->n_meters_per_flow,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->n_meters_per_flow != 0)),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
+ (p->n_meters_per_flow <=
+ PIPELINE_FA_N_TC_MAX)), params->name,
+ arg_name, arg_value);
+
+ continue;
+ }
+
+ /* flow_id_offset */
+ if (strcmp(arg_name, "flow_id_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ flow_id_offset_present == 0,
+ params->name, arg_name);
+ flow_id_offset_present = 1;
+
+ status = parser_read_uint32(&p->flow_id_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* ip_hdr_offset */
+ if (strcmp(arg_name, "ip_hdr_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ ip_hdr_offset_present == 0,
+ params->name, arg_name);
+ ip_hdr_offset_present = 1;
+
+ status = parser_read_uint32(&p->ip_hdr_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* color_offset */
+ if (strcmp(arg_name, "color_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ color_offset_present == 0, params->name,
+ arg_name);
+ color_offset_present = 1;
+
+ status = parser_read_uint32(&p->color_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dscp_enabled = 1;
+
+ continue;
+ }
+
+ /* Unknown argument */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check that mandatory arguments are present */
+ PIPELINE_PARSE_ERR_MANDATORY((n_flows_present), params->name,
+ "n_flows");
+ PIPELINE_PARSE_ERR_MANDATORY((flow_id_offset_present),
+ params->name, "flow_id_offset");
+ PIPELINE_PARSE_ERR_MANDATORY((ip_hdr_offset_present),
+ params->name, "ip_hdr_offset");
+ PIPELINE_PARSE_ERR_MANDATORY((color_offset_present), params->name,
+ "color_offset");
+
+ return 0;
+}
+
+static void
+dscp_init(struct pipeline_flow_actions *p)
+{
+ uint32_t i;
+
+ for (i = 0; i < PIPELINE_FA_N_DSCP; i++) {
+ p->dscp[i].traffic_class = 0;
+ p->dscp[i].color = e_RTE_METER_GREEN;
+ }
+}
+
+static void *pipeline_fa_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_flow_actions *p_fa;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if (params == NULL)
+ return NULL;
+
+ if (params->n_ports_in != params->n_ports_out)
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct pipeline_flow_actions));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+ p_fa = (struct pipeline_flow_actions *) p;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Flow actions");
+
+ /* Parse arguments */
+ if (pipeline_fa_parse_args(&p_fa->params, params))
+ return NULL;
+
+ dscp_init(p_fa);
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ p->n_tables = 1;
+ {
+ struct rte_table_array_params table_array_params = {
+ .n_entries = p_fa->params.n_flows,
+ .offset = p_fa->params.flow_id_offset,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_array_ops,
+ .arg_create = &table_array_params,
+ .f_action_hit = get_fa_table_ah_hit(p_fa),
+ .f_action_miss = NULL,
+ .arg_ah = p_fa,
+ .action_data_size =
+ sizeof(struct flow_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Initialize table entries */
+ for (i = 0; i < p_fa->params.n_flows; i++) {
+ struct rte_table_array_key key = {
+ .pos = i,
+ };
+
+ struct flow_table_entry entry;
+ struct rte_pipeline_table_entry *entry_ptr;
+ int key_found, status;
+
+ flow_table_entry_set_default(p_fa, &entry);
+
+ status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &key,
+ (struct rte_pipeline_table_entry *) &entry,
+ &key_found,
+ &entry_ptr);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_fa->custom_handlers,
+ custom_handlers,
+ sizeof(p_fa->custom_handlers));
+
+ return p;
+}
+
+static int
+pipeline_fa_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_fa_track(void *pipeline,
+ __rte_unused uint32_t port_in,
+ uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_in == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int
+pipeline_fa_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+void *
+pipeline_fa_msg_req_custom_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_actions *p_fa =
+ (struct pipeline_flow_actions *) p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_FA_MSG_REQS) ?
+ p_fa->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+void *
+pipeline_fa_msg_req_flow_config_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
+ struct pipeline_fa_flow_config_msg_req *req = msg;
+ struct pipeline_fa_flow_config_msg_rsp *rsp = msg;
+ struct flow_table_entry *entry;
+ uint32_t mask, i;
+
+ /* Set flow table entry to default if not configured before */
+ if (req->entry_ptr == NULL) {
+ struct rte_table_array_key key = {
+ .pos = req->flow_id % p_fa->params.n_flows,
+ };
+
+ struct flow_table_entry default_entry;
+
+ int key_found, status;
+
+ flow_table_entry_set_default(p_fa, &default_entry);
+
+ status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &key,
+ (struct rte_pipeline_table_entry *) &default_entry,
+ &key_found,
+ (struct rte_pipeline_table_entry **) &entry);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+ } else
+ entry = (struct flow_table_entry *) req->entry_ptr;
+
+ /* Meter */
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ int status;
+
+ if ((mask & req->meter_update_mask) == 0)
+ continue;
+
+ status = flow_table_entry_set_meter(entry, i, &req->params);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+ }
+
+ /* Policer */
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ if ((mask & req->policer_update_mask) == 0)
+ continue;
+
+ flow_table_entry_set_policer(entry, i, &req->params);
+ }
+
+ /* Port */
+ if (req->port_update)
+ flow_table_entry_set_port_id(p_fa, entry, &req->params);
+
+ /* Response */
+ rsp->status = 0;
+ rsp->entry_ptr = (void *) entry;
+ return rsp;
+}
+
+void *
+pipeline_fa_msg_req_flow_config_bulk_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
+ struct pipeline_fa_flow_config_bulk_msg_req *req = msg;
+ struct pipeline_fa_flow_config_bulk_msg_rsp *rsp = msg;
+ uint32_t i;
+
+ for (i = 0; i < req->n_flows; i++) {
+ struct flow_table_entry *entry;
+ uint32_t j, mask;
+
+ /* Set flow table entry to default if not configured before */
+ if (req->entry_ptr[i] == NULL) {
+ struct rte_table_array_key key = {
+ .pos = req->flow_id[i] % p_fa->params.n_flows,
+ };
+
+ struct flow_table_entry entry_to_add;
+
+ int key_found, status;
+
+ flow_table_entry_set_default(p_fa, &entry_to_add);
+
+ status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &key,
+ (struct rte_pipeline_table_entry *) &entry_to_add,
+ &key_found,
+ (struct rte_pipeline_table_entry **) &entry);
+ if (status) {
+ rsp->n_flows = i;
+ return rsp;
+ }
+
+ req->entry_ptr[i] = (void *) entry;
+ } else
+ entry = (struct flow_table_entry *) req->entry_ptr[i];
+
+ /* Meter */
+ for (j = 0, mask = 1;
+ j < PIPELINE_FA_N_TC_MAX;
+ j++, mask <<= 1) {
+ int status;
+
+ if ((mask & req->meter_update_mask) == 0)
+ continue;
+
+ status = flow_table_entry_set_meter(entry,
+ j, &req->params[i]);
+ if (status) {
+ rsp->n_flows = i;
+ return rsp;
+ }
+ }
+
+ /* Policer */
+ for (j = 0, mask = 1;
+ j < PIPELINE_FA_N_TC_MAX;
+ j++, mask <<= 1) {
+ if ((mask & req->policer_update_mask) == 0)
+ continue;
+
+ flow_table_entry_set_policer(entry,
+ j, &req->params[i]);
+ }
+
+ /* Port */
+ if (req->port_update)
+ flow_table_entry_set_port_id(p_fa,
+ entry, &req->params[i]);
+ }
+
+ /* Response */
+ rsp->n_flows = i;
+ return rsp;
+}
+
+void *
+pipeline_fa_msg_req_dscp_config_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
+ struct pipeline_fa_dscp_config_msg_req *req = msg;
+ struct pipeline_fa_dscp_config_msg_rsp *rsp = msg;
+
+ /* Check request */
+ if ((req->dscp >= PIPELINE_FA_N_DSCP) ||
+ (req->traffic_class >= PIPELINE_FA_N_TC_MAX) ||
+ (req->color >= e_RTE_METER_COLORS)) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ p_fa->dscp[req->dscp].traffic_class = req->traffic_class;
+ p_fa->dscp[req->dscp].color = req->color;
+ rsp->status = 0;
+ return rsp;
+}
+
+void *
+pipeline_fa_msg_req_policer_stats_read_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_fa_policer_stats_msg_req *req = msg;
+ struct pipeline_fa_policer_stats_msg_rsp *rsp = msg;
+
+ struct flow_table_entry *entry = req->entry_ptr;
+ uint32_t policer_id = req->policer_id;
+ int clear = req->clear;
+
+ /* Check request */
+ if ((req->entry_ptr == NULL) ||
+ (req->policer_id >= PIPELINE_FA_N_TC_MAX)) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ memcpy(&rsp->stats,
+ &entry->mp[policer_id].stats,
+ sizeof(rsp->stats));
+ if (clear)
+ memset(&entry->mp[policer_id].stats,
+ 0, sizeof(entry->mp[policer_id].stats));
+ rsp->status = 0;
+ return rsp;
+}
+
+struct pipeline_be_ops pipeline_flow_actions_be_ops = {
+ .f_init = pipeline_fa_init,
+ .f_free = pipeline_fa_free,
+ .f_run = NULL,
+ .f_timer = pipeline_fa_timer,
+ .f_track = pipeline_fa_track,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h
new file mode 100644
index 00000000..456f2cca
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h
@@ -0,0 +1,168 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FLOW_ACTIONS_BE_H__
+#define __INCLUDE_PIPELINE_FLOW_ACTIONS_BE_H__
+
+#include <rte_meter.h>
+
+#include "pipeline_common_be.h"
+
+#ifndef PIPELINE_FA_N_TC_MAX
+#define PIPELINE_FA_N_TC_MAX 4
+#endif
+
+#define PIPELINE_FA_N_DSCP 64
+
+struct pipeline_fa_params {
+ uint32_t n_flows;
+ uint32_t n_meters_per_flow;
+ uint32_t flow_id_offset;
+ uint32_t ip_hdr_offset;
+ uint32_t color_offset;
+ uint32_t dscp_enabled;
+};
+
+int
+pipeline_fa_parse_args(struct pipeline_fa_params *p,
+ struct pipeline_params *params);
+
+struct pipeline_fa_policer_action {
+ uint32_t drop;
+ enum rte_meter_color color;
+};
+
+struct pipeline_fa_policer_params {
+ struct pipeline_fa_policer_action action[e_RTE_METER_COLORS];
+};
+
+struct pipeline_fa_flow_params {
+ struct rte_meter_trtcm_params m[PIPELINE_FA_N_TC_MAX];
+ struct pipeline_fa_policer_params p[PIPELINE_FA_N_TC_MAX];
+ uint32_t port_id;
+};
+
+int
+pipeline_fa_flow_params_set_default(struct pipeline_fa_flow_params *params);
+
+struct pipeline_fa_policer_stats {
+ uint64_t n_pkts[e_RTE_METER_COLORS];
+ uint64_t n_pkts_drop;
+};
+
+enum pipeline_fa_msg_req_type {
+ PIPELINE_FA_MSG_REQ_FLOW_CONFIG = 0,
+ PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK,
+ PIPELINE_FA_MSG_REQ_DSCP_CONFIG,
+ PIPELINE_FA_MSG_REQ_POLICER_STATS_READ,
+ PIPELINE_FA_MSG_REQS,
+};
+
+/*
+ * MSG FLOW CONFIG
+ */
+struct pipeline_fa_flow_config_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fa_msg_req_type subtype;
+
+ void *entry_ptr;
+ uint32_t flow_id;
+
+ uint32_t meter_update_mask;
+ uint32_t policer_update_mask;
+ uint32_t port_update;
+ struct pipeline_fa_flow_params params;
+};
+
+struct pipeline_fa_flow_config_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG FLOW CONFIG BULK
+ */
+struct pipeline_fa_flow_config_bulk_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fa_msg_req_type subtype;
+
+ void **entry_ptr;
+ uint32_t *flow_id;
+ uint32_t n_flows;
+
+ uint32_t meter_update_mask;
+ uint32_t policer_update_mask;
+ uint32_t port_update;
+ struct pipeline_fa_flow_params *params;
+};
+
+struct pipeline_fa_flow_config_bulk_msg_rsp {
+ uint32_t n_flows;
+};
+
+/*
+ * MSG DSCP CONFIG
+ */
+struct pipeline_fa_dscp_config_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fa_msg_req_type subtype;
+
+ uint32_t dscp;
+ uint32_t traffic_class;
+ enum rte_meter_color color;
+};
+
+struct pipeline_fa_dscp_config_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG POLICER STATS READ
+ */
+struct pipeline_fa_policer_stats_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fa_msg_req_type subtype;
+
+ void *entry_ptr;
+ uint32_t policer_id;
+ int clear;
+};
+
+struct pipeline_fa_policer_stats_msg_rsp {
+ int status;
+ struct pipeline_fa_policer_stats stats;
+};
+
+extern struct pipeline_be_ops pipeline_flow_actions_be_ops;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
new file mode 100644
index 00000000..19215748
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -0,0 +1,2215 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_flow_classification.h"
+#include "hash_func.h"
+
+/*
+ * Key conversion
+ */
+
+struct pkt_key_qinq {
+ uint16_t ethertype_svlan;
+ uint16_t svlan;
+ uint16_t ethertype_cvlan;
+ uint16_t cvlan;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_5tuple {
+ uint8_t ttl;
+ uint8_t proto;
+ uint16_t checksum;
+ uint32_t ip_src;
+ uint32_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_5tuple {
+ uint16_t payload_length;
+ uint8_t proto;
+ uint8_t hop_limit;
+ uint8_t ip_src[16];
+ uint8_t ip_dst[16];
+ uint16_t port_src;
+ uint16_t port_dst;
+} __attribute__((__packed__));
+
+static int
+app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
+ uint8_t *key_out,
+ uint32_t *signature)
+{
+ uint8_t buffer[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+ void *key_buffer = (key_out) ? key_out : buffer;
+
+ switch (key_in->type) {
+ case FLOW_KEY_QINQ:
+ {
+ struct pkt_key_qinq *qinq = key_buffer;
+
+ qinq->ethertype_svlan = 0;
+ qinq->svlan = rte_bswap16(key_in->key.qinq.svlan);
+ qinq->ethertype_cvlan = 0;
+ qinq->cvlan = rte_bswap16(key_in->key.qinq.cvlan);
+
+ if (signature)
+ *signature = (uint32_t) hash_default_key8(qinq, 8, 0);
+ return 0;
+ }
+
+ case FLOW_KEY_IPV4_5TUPLE:
+ {
+ struct pkt_key_ipv4_5tuple *ipv4 = key_buffer;
+
+ ipv4->ttl = 0;
+ ipv4->proto = key_in->key.ipv4_5tuple.proto;
+ ipv4->checksum = 0;
+ ipv4->ip_src = rte_bswap32(key_in->key.ipv4_5tuple.ip_src);
+ ipv4->ip_dst = rte_bswap32(key_in->key.ipv4_5tuple.ip_dst);
+ ipv4->port_src = rte_bswap16(key_in->key.ipv4_5tuple.port_src);
+ ipv4->port_dst = rte_bswap16(key_in->key.ipv4_5tuple.port_dst);
+
+ if (signature)
+ *signature = (uint32_t) hash_default_key16(ipv4, 16, 0);
+ return 0;
+ }
+
+ case FLOW_KEY_IPV6_5TUPLE:
+ {
+ struct pkt_key_ipv6_5tuple *ipv6 = key_buffer;
+
+ memset(ipv6, 0, 64);
+ ipv6->payload_length = 0;
+ ipv6->proto = key_in->key.ipv6_5tuple.proto;
+ ipv6->hop_limit = 0;
+ memcpy(&ipv6->ip_src, &key_in->key.ipv6_5tuple.ip_src, 16);
+ memcpy(&ipv6->ip_dst, &key_in->key.ipv6_5tuple.ip_dst, 16);
+ ipv6->port_src = rte_bswap16(key_in->key.ipv6_5tuple.port_src);
+ ipv6->port_dst = rte_bswap16(key_in->key.ipv6_5tuple.port_dst);
+
+ if (signature)
+ *signature = (uint32_t) hash_default_key64(ipv6, 64, 0);
+ return 0;
+ }
+
+ default:
+ return -1;
+ }
+}
+
+/*
+ * Flow classification pipeline
+ */
+
+struct app_pipeline_fc_flow {
+ struct pipeline_fc_key key;
+ uint32_t port_id;
+ uint32_t flow_id;
+ uint32_t signature;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_fc_flow) node;
+};
+
+#define N_BUCKETS 65536
+
+struct app_pipeline_fc {
+ /* Parameters */
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+
+ /* Flows */
+ TAILQ_HEAD(, app_pipeline_fc_flow) flows[N_BUCKETS];
+ uint32_t n_flows;
+
+ /* Default flow */
+ uint32_t default_flow_present;
+ uint32_t default_flow_port_id;
+ void *default_flow_entry_ptr;
+};
+
+static struct app_pipeline_fc_flow *
+app_pipeline_fc_flow_find(struct app_pipeline_fc *p,
+ struct pipeline_fc_key *key)
+{
+ struct app_pipeline_fc_flow *f;
+ uint32_t signature, bucket_id;
+
+ app_pipeline_fc_key_convert(key, NULL, &signature);
+ bucket_id = signature & (N_BUCKETS - 1);
+
+ TAILQ_FOREACH(f, &p->flows[bucket_id], node)
+ if ((signature == f->signature) &&
+ (memcmp(key,
+ &f->key,
+ sizeof(struct pipeline_fc_key)) == 0))
+ return f;
+
+ return NULL;
+}
+
+static void*
+app_pipeline_fc_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct app_pipeline_fc *p;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fc));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+
+ for (i = 0; i < N_BUCKETS; i++)
+ TAILQ_INIT(&p->flows[i]);
+ p->n_flows = 0;
+
+ return (void *) p;
+}
+
+static int
+app_pipeline_fc_free(void *pipeline)
+{
+ struct app_pipeline_fc *p = pipeline;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ for (i = 0; i < N_BUCKETS; i++)
+ while (!TAILQ_EMPTY(&p->flows[i])) {
+ struct app_pipeline_fc_flow *flow;
+
+ flow = TAILQ_FIRST(&p->flows[i]);
+ TAILQ_REMOVE(&p->flows[i], flow, node);
+ rte_free(flow);
+ }
+
+ rte_free(p);
+ return 0;
+}
+
+static int
+app_pipeline_fc_key_check(struct pipeline_fc_key *key)
+{
+ switch (key->type) {
+ case FLOW_KEY_QINQ:
+ {
+ uint16_t svlan = key->key.qinq.svlan;
+ uint16_t cvlan = key->key.qinq.cvlan;
+
+ if ((svlan & 0xF000) ||
+ (cvlan & 0xF000))
+ return -1;
+
+ return 0;
+ }
+
+ case FLOW_KEY_IPV4_5TUPLE:
+ return 0;
+
+ case FLOW_KEY_IPV6_5TUPLE:
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+int
+app_pipeline_fc_add(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key,
+ uint32_t port_id,
+ uint32_t flow_id)
+{
+ struct app_pipeline_fc *p;
+ struct app_pipeline_fc_flow *flow;
+
+ struct pipeline_fc_add_msg_req *req;
+ struct pipeline_fc_add_msg_rsp *rsp;
+
+ uint32_t signature;
+ int new_flow;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ if (app_pipeline_fc_key_check(key) != 0)
+ return -1;
+
+ /* Find existing flow or allocate new flow */
+ flow = app_pipeline_fc_flow_find(p, key);
+ new_flow = (flow == NULL);
+ if (flow == NULL) {
+ flow = rte_malloc(NULL, sizeof(*flow), RTE_CACHE_LINE_SIZE);
+
+ if (flow == NULL)
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD;
+ app_pipeline_fc_key_convert(key, req->key, &signature);
+ req->port_id = port_id;
+ req->flow_id = flow_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ if (new_flow)
+ rte_free(flow);
+ return -1;
+ }
+
+ /* Read response and write flow */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL) ||
+ ((new_flow == 0) && (rsp->key_found == 0)) ||
+ ((new_flow == 1) && (rsp->key_found == 1))) {
+ app_msg_free(app, rsp);
+ if (new_flow)
+ rte_free(flow);
+ return -1;
+ }
+
+ memset(&flow->key, 0, sizeof(flow->key));
+ memcpy(&flow->key, key, sizeof(flow->key));
+ flow->port_id = port_id;
+ flow->flow_id = flow_id;
+ flow->signature = signature;
+ flow->entry_ptr = rsp->entry_ptr;
+
+ /* Commit rule */
+ if (new_flow) {
+ uint32_t bucket_id = signature & (N_BUCKETS - 1);
+
+ TAILQ_INSERT_TAIL(&p->flows[bucket_id], flow, node);
+ p->n_flows++;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fc_add_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key,
+ uint32_t *port_id,
+ uint32_t *flow_id,
+ uint32_t n_keys)
+{
+ struct app_pipeline_fc *p;
+ struct pipeline_fc_add_bulk_msg_req *req;
+ struct pipeline_fc_add_bulk_msg_rsp *rsp;
+
+ struct app_pipeline_fc_flow **flow;
+ uint32_t *signature;
+ int *new_flow;
+ struct pipeline_fc_add_bulk_flow_req *flow_req;
+ struct pipeline_fc_add_bulk_flow_rsp *flow_rsp;
+
+ uint32_t i;
+ int status;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (port_id == NULL) ||
+ (flow_id == NULL) ||
+ (n_keys == 0))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ for (i = 0; i < n_keys; i++)
+ if (port_id[i] >= p->n_ports_out)
+ return -1;
+
+ for (i = 0; i < n_keys; i++)
+ if (app_pipeline_fc_key_check(&key[i]) != 0)
+ return -1;
+
+ /* Memory allocation */
+ flow = rte_malloc(NULL,
+ n_keys * sizeof(struct app_pipeline_fc_flow *),
+ RTE_CACHE_LINE_SIZE);
+ if (flow == NULL)
+ return -1;
+
+ signature = rte_malloc(NULL,
+ n_keys * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (signature == NULL) {
+ rte_free(flow);
+ return -1;
+ }
+
+ new_flow = rte_malloc(
+ NULL,
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
+ if (new_flow == NULL) {
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ flow_req = rte_malloc(NULL,
+ n_keys * sizeof(struct pipeline_fc_add_bulk_flow_req),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_req == NULL) {
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ flow_rsp = rte_malloc(NULL,
+ n_keys * sizeof(struct pipeline_fc_add_bulk_flow_rsp),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_rsp == NULL) {
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ /* Find existing flow or allocate new flow */
+ for (i = 0; i < n_keys; i++) {
+ flow[i] = app_pipeline_fc_flow_find(p, &key[i]);
+ new_flow[i] = (flow[i] == NULL);
+ if (flow[i] == NULL) {
+ flow[i] = rte_zmalloc(NULL,
+ sizeof(struct app_pipeline_fc_flow),
+ RTE_CACHE_LINE_SIZE);
+
+ if (flow[i] == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < i; j++)
+ if (new_flow[j])
+ rte_free(flow[j]);
+
+ rte_free(flow_rsp);
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+ }
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ for (i = 0; i < n_keys; i++)
+ if (new_flow[i])
+ rte_free(flow[i]);
+
+ rte_free(flow_rsp);
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ app_pipeline_fc_key_convert(&key[i],
+ flow_req[i].key,
+ &signature[i]);
+ flow_req[i].port_id = port_id[i];
+ flow_req[i].flow_id = flow_id[i];
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK;
+ req->req = flow_req;
+ req->rsp = flow_rsp;
+ req->n_keys = n_keys;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, 10000);
+ if (rsp == NULL) {
+ for (i = 0; i < n_keys; i++)
+ if (new_flow[i])
+ rte_free(flow[i]);
+
+ rte_free(flow_rsp);
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ /* Read response */
+ status = 0;
+
+ for (i = 0; i < rsp->n_keys; i++)
+ if ((flow_rsp[i].entry_ptr == NULL) ||
+ ((new_flow[i] == 0) && (flow_rsp[i].key_found == 0)) ||
+ ((new_flow[i] == 1) && (flow_rsp[i].key_found == 1)))
+ status = -1;
+
+ if (rsp->n_keys < n_keys)
+ status = -1;
+
+ /* Commit flows */
+ for (i = 0; i < rsp->n_keys; i++) {
+ memcpy(&flow[i]->key, &key[i], sizeof(flow[i]->key));
+ flow[i]->port_id = port_id[i];
+ flow[i]->flow_id = flow_id[i];
+ flow[i]->signature = signature[i];
+ flow[i]->entry_ptr = flow_rsp[i].entry_ptr;
+
+ if (new_flow[i]) {
+ uint32_t bucket_id = signature[i] & (N_BUCKETS - 1);
+
+ TAILQ_INSERT_TAIL(&p->flows[bucket_id], flow[i], node);
+ p->n_flows++;
+ }
+ }
+
+ /* Free resources */
+ app_msg_free(app, rsp);
+
+ for (i = rsp->n_keys; i < n_keys; i++)
+ if (new_flow[i])
+ rte_free(flow[i]);
+
+ rte_free(flow_rsp);
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+
+ return status;
+}
+
+int
+app_pipeline_fc_del(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key)
+{
+ struct app_pipeline_fc *p;
+ struct app_pipeline_fc_flow *flow;
+
+ struct pipeline_fc_del_msg_req *req;
+ struct pipeline_fc_del_msg_rsp *rsp;
+
+ uint32_t signature, bucket_id;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ if (app_pipeline_fc_key_check(key) != 0)
+ return -1;
+
+ /* Find rule */
+ flow = app_pipeline_fc_flow_find(p, key);
+ if (flow == NULL)
+ return 0;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_DEL;
+ app_pipeline_fc_key_convert(key, req->key, &signature);
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Remove rule */
+ bucket_id = signature & (N_BUCKETS - 1);
+ TAILQ_REMOVE(&p->flows[bucket_id], flow, node);
+ p->n_flows--;
+ rte_free(flow);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fc_add_default(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_fc *p;
+
+ struct pipeline_fc_add_default_msg_req *req;
+ struct pipeline_fc_add_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write flow */
+ if (rsp->status || (rsp->entry_ptr == NULL)) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ p->default_flow_port_id = port_id;
+ p->default_flow_entry_ptr = rsp->entry_ptr;
+
+ /* Commit route */
+ p->default_flow_present = 1;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fc_del_default(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_fc *p;
+
+ struct pipeline_fc_del_default_msg_req *req;
+ struct pipeline_fc_del_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit route */
+ p->default_flow_present = 0;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * Flow ls
+ */
+
+static void
+print_fc_qinq_flow(struct app_pipeline_fc_flow *flow)
+{
+ printf("(SVLAN = %" PRIu32 ", "
+ "CVLAN = %" PRIu32 ") => "
+ "Port = %" PRIu32 ", "
+ "Flow ID = %" PRIu32 ", "
+ "(signature = 0x%08" PRIx32 ", "
+ "entry_ptr = %p)\n",
+
+ flow->key.key.qinq.svlan,
+ flow->key.key.qinq.cvlan,
+ flow->port_id,
+ flow->flow_id,
+ flow->signature,
+ flow->entry_ptr);
+}
+
+static void
+print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
+{
+ printf("(SA = %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ", "
+ "DA = %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ", "
+ "SP = %" PRIu32 ", "
+ "DP = %" PRIu32 ", "
+ "Proto = %" PRIu32 ") => "
+ "Port = %" PRIu32 ", "
+ "Flow ID = %" PRIu32 " "
+ "(signature = 0x%08" PRIx32 ", "
+ "entry_ptr = %p)\n",
+
+ (flow->key.key.ipv4_5tuple.ip_src >> 24) & 0xFF,
+ (flow->key.key.ipv4_5tuple.ip_src >> 16) & 0xFF,
+ (flow->key.key.ipv4_5tuple.ip_src >> 8) & 0xFF,
+ flow->key.key.ipv4_5tuple.ip_src & 0xFF,
+
+ (flow->key.key.ipv4_5tuple.ip_dst >> 24) & 0xFF,
+ (flow->key.key.ipv4_5tuple.ip_dst >> 16) & 0xFF,
+ (flow->key.key.ipv4_5tuple.ip_dst >> 8) & 0xFF,
+ flow->key.key.ipv4_5tuple.ip_dst & 0xFF,
+
+ flow->key.key.ipv4_5tuple.port_src,
+ flow->key.key.ipv4_5tuple.port_dst,
+
+ flow->key.key.ipv4_5tuple.proto,
+
+ flow->port_id,
+ flow->flow_id,
+ flow->signature,
+ flow->entry_ptr);
+}
+
+static void
+print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
+ printf("(SA = %02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32 ", "
+ "DA = %02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32 ", "
+ "SP = %" PRIu32 ", "
+ "DP = %" PRIu32 " "
+ "Proto = %" PRIu32 " "
+ "=> Port = %" PRIu32 ", "
+ "Flow ID = %" PRIu32 " "
+ "(signature = 0x%08" PRIx32 ", "
+ "entry_ptr = %p)\n",
+
+ flow->key.key.ipv6_5tuple.ip_src[0],
+ flow->key.key.ipv6_5tuple.ip_src[1],
+ flow->key.key.ipv6_5tuple.ip_src[2],
+ flow->key.key.ipv6_5tuple.ip_src[3],
+ flow->key.key.ipv6_5tuple.ip_src[4],
+ flow->key.key.ipv6_5tuple.ip_src[5],
+ flow->key.key.ipv6_5tuple.ip_src[6],
+ flow->key.key.ipv6_5tuple.ip_src[7],
+ flow->key.key.ipv6_5tuple.ip_src[8],
+ flow->key.key.ipv6_5tuple.ip_src[9],
+ flow->key.key.ipv6_5tuple.ip_src[10],
+ flow->key.key.ipv6_5tuple.ip_src[11],
+ flow->key.key.ipv6_5tuple.ip_src[12],
+ flow->key.key.ipv6_5tuple.ip_src[13],
+ flow->key.key.ipv6_5tuple.ip_src[14],
+ flow->key.key.ipv6_5tuple.ip_src[15],
+
+ flow->key.key.ipv6_5tuple.ip_dst[0],
+ flow->key.key.ipv6_5tuple.ip_dst[1],
+ flow->key.key.ipv6_5tuple.ip_dst[2],
+ flow->key.key.ipv6_5tuple.ip_dst[3],
+ flow->key.key.ipv6_5tuple.ip_dst[4],
+ flow->key.key.ipv6_5tuple.ip_dst[5],
+ flow->key.key.ipv6_5tuple.ip_dst[6],
+ flow->key.key.ipv6_5tuple.ip_dst[7],
+ flow->key.key.ipv6_5tuple.ip_dst[8],
+ flow->key.key.ipv6_5tuple.ip_dst[9],
+ flow->key.key.ipv6_5tuple.ip_dst[10],
+ flow->key.key.ipv6_5tuple.ip_dst[11],
+ flow->key.key.ipv6_5tuple.ip_dst[12],
+ flow->key.key.ipv6_5tuple.ip_dst[13],
+ flow->key.key.ipv6_5tuple.ip_dst[14],
+ flow->key.key.ipv6_5tuple.ip_dst[15],
+
+ flow->key.key.ipv6_5tuple.port_src,
+ flow->key.key.ipv6_5tuple.port_dst,
+
+ flow->key.key.ipv6_5tuple.proto,
+
+ flow->port_id,
+ flow->flow_id,
+ flow->signature,
+ flow->entry_ptr);
+}
+
+static void
+print_fc_flow(struct app_pipeline_fc_flow *flow)
+{
+ switch (flow->key.type) {
+ case FLOW_KEY_QINQ:
+ print_fc_qinq_flow(flow);
+ break;
+
+ case FLOW_KEY_IPV4_5TUPLE:
+ print_fc_ipv4_5tuple_flow(flow);
+ break;
+
+ case FLOW_KEY_IPV6_5TUPLE:
+ print_fc_ipv6_5tuple_flow(flow);
+ break;
+ }
+}
+
+static int
+app_pipeline_fc_ls(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_fc *p;
+ struct app_pipeline_fc_flow *flow;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ for (i = 0; i < N_BUCKETS; i++)
+ TAILQ_FOREACH(flow, &p->flows[i], node)
+ print_fc_flow(flow);
+
+ if (p->default_flow_present)
+ printf("Default flow: port %" PRIu32 " (entry ptr = %p)\n",
+ p->default_flow_port_id,
+ p->default_flow_entry_ptr);
+ else
+ printf("Default: DROP\n");
+
+ return 0;
+}
+
+/*
+ * flow add qinq
+ */
+
+struct cmd_fc_add_qinq_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t qinq_string;
+ uint16_t svlan;
+ uint16_t cvlan;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t flowid_string;
+ uint32_t flow_id;
+};
+
+static void
+cmd_fc_add_qinq_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_add_qinq_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key key;
+ int status;
+
+ memset(&key, 0, sizeof(key));
+ key.type = FLOW_KEY_QINQ;
+ key.key.qinq.svlan = params->svlan;
+ key.key.qinq.cvlan = params->cvlan;
+
+ status = app_pipeline_fc_add(app,
+ params->pipeline_id,
+ &key,
+ params->port,
+ params->flow_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, p_string, "p");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flow_string,
+ "flow");
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, add_string,
+ "add");
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_qinq_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, qinq_string,
+ "qinq");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_svlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, svlan, UINT16);
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_cvlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, cvlan, UINT16);
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, port_string,
+ "port");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, port, UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_flowid_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flowid_string,
+ "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_flow_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, flow_id, UINT32);
+
+cmdline_parse_inst_t cmd_fc_add_qinq = {
+ .f = cmd_fc_add_qinq_parsed,
+ .data = NULL,
+ .help_str = "Flow add (Q-in-Q)",
+ .tokens = {
+ (void *) &cmd_fc_add_qinq_p_string,
+ (void *) &cmd_fc_add_qinq_pipeline_id,
+ (void *) &cmd_fc_add_qinq_flow_string,
+ (void *) &cmd_fc_add_qinq_add_string,
+ (void *) &cmd_fc_add_qinq_qinq_string,
+ (void *) &cmd_fc_add_qinq_svlan,
+ (void *) &cmd_fc_add_qinq_cvlan,
+ (void *) &cmd_fc_add_qinq_port_string,
+ (void *) &cmd_fc_add_qinq_port,
+ (void *) &cmd_fc_add_qinq_flowid_string,
+ (void *) &cmd_fc_add_qinq_flow_id,
+ NULL,
+ },
+};
+
+/*
+ * flow add qinq all
+ */
+
+struct cmd_fc_add_qinq_all_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t qinq_string;
+ cmdline_fixed_string_t all_string;
+ uint32_t n_flows;
+ uint32_t n_ports;
+};
+
+#ifndef N_FLOWS_BULK
+#define N_FLOWS_BULK 4096
+#endif
+
+static void
+cmd_fc_add_qinq_all_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_add_qinq_all_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key *key;
+ uint32_t *port_id;
+ uint32_t *flow_id;
+ uint32_t id;
+
+ /* Check input arguments */
+ if (params->n_flows == 0) {
+ printf("Invalid number of flows\n");
+ return;
+ }
+
+ if (params->n_ports == 0) {
+ printf("Invalid number of output ports\n");
+ return;
+ }
+
+ /* Memory allocation */
+ key = rte_zmalloc(NULL,
+ N_FLOWS_BULK * sizeof(*key),
+ RTE_CACHE_LINE_SIZE);
+ if (key == NULL) {
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ port_id = rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(*port_id),
+ RTE_CACHE_LINE_SIZE);
+ if (port_id == NULL) {
+ rte_free(key);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ flow_id = rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(*flow_id),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_id == NULL) {
+ rte_free(port_id);
+ rte_free(key);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ /* Flow add */
+ for (id = 0; id < params->n_flows; id++) {
+ uint32_t pos = id & (N_FLOWS_BULK - 1);
+
+ key[pos].type = FLOW_KEY_QINQ;
+ key[pos].key.qinq.svlan = id >> 12;
+ key[pos].key.qinq.cvlan = id & 0xFFF;
+
+ port_id[pos] = id % params->n_ports;
+ flow_id[pos] = id;
+
+ if ((pos == N_FLOWS_BULK - 1) ||
+ (id == params->n_flows - 1)) {
+ int status;
+
+ status = app_pipeline_fc_add_bulk(app,
+ params->pipeline_id,
+ key,
+ port_id,
+ flow_id,
+ pos + 1);
+
+ if (status != 0) {
+ printf("Command failed\n");
+
+ break;
+ }
+ }
+ }
+
+ /* Memory free */
+ rte_free(flow_id);
+ rte_free(port_id);
+ rte_free(key);
+}
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_all_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_all_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_all_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, flow_string,
+ "flow");
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_all_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, add_string,
+ "add");
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_all_qinq_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, qinq_string,
+ "qinq");
+
+cmdline_parse_token_string_t cmd_fc_add_qinq_all_all_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, all_string,
+ "all");
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_all_n_flows =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, n_flows,
+ UINT32);
+
+cmdline_parse_token_num_t cmd_fc_add_qinq_all_n_ports =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, n_ports,
+ UINT32);
+
+cmdline_parse_inst_t cmd_fc_add_qinq_all = {
+ .f = cmd_fc_add_qinq_all_parsed,
+ .data = NULL,
+ .help_str = "Flow add all (Q-in-Q)",
+ .tokens = {
+ (void *) &cmd_fc_add_qinq_all_p_string,
+ (void *) &cmd_fc_add_qinq_all_pipeline_id,
+ (void *) &cmd_fc_add_qinq_all_flow_string,
+ (void *) &cmd_fc_add_qinq_all_add_string,
+ (void *) &cmd_fc_add_qinq_all_qinq_string,
+ (void *) &cmd_fc_add_qinq_all_all_string,
+ (void *) &cmd_fc_add_qinq_all_n_flows,
+ (void *) &cmd_fc_add_qinq_all_n_ports,
+ NULL,
+ },
+};
+
+/*
+ * flow add ipv4_5tuple
+ */
+
+struct cmd_fc_add_ipv4_5tuple_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t ipv4_5tuple_string;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint32_t proto;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t flowid_string;
+ uint32_t flow_id;
+};
+
+static void
+cmd_fc_add_ipv4_5tuple_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_add_ipv4_5tuple_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key key;
+ int status;
+
+ memset(&key, 0, sizeof(key));
+ key.type = FLOW_KEY_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.ip_src = rte_bswap32(
+ params->ip_src.addr.ipv4.s_addr);
+ key.key.ipv4_5tuple.ip_dst = rte_bswap32(
+ params->ip_dst.addr.ipv4.s_addr);
+ key.key.ipv4_5tuple.port_src = params->port_src;
+ key.key.ipv4_5tuple.port_dst = params->port_dst;
+ key.key.ipv4_5tuple.proto = params->proto;
+
+ status = app_pipeline_fc_add(app,
+ params->pipeline_id,
+ &key,
+ params->port,
+ params->flow_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
+ flow_string, "flow");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
+ add_string, "add");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_ipv4_5tuple_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
+ ipv4_5tuple_string, "ipv4_5tuple");
+
+cmdline_parse_token_ipaddr_t cmd_fc_add_ipv4_5tuple_ip_src =
+ TOKEN_IPV4_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, ip_src);
+
+cmdline_parse_token_ipaddr_t cmd_fc_add_ipv4_5tuple_ip_dst =
+ TOKEN_IPV4_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, ip_dst);
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port_src =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_src,
+ UINT16);
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port_dst =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_dst,
+ UINT16);
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_proto =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, proto,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_string,
+ "port");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flowid_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
+ flowid_string, "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_flow_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, flow_id,
+ UINT32);
+
+cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
+ .f = cmd_fc_add_ipv4_5tuple_parsed,
+ .data = NULL,
+ .help_str = "Flow add (IPv4 5-tuple)",
+ .tokens = {
+ (void *) &cmd_fc_add_ipv4_5tuple_p_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_pipeline_id,
+ (void *) &cmd_fc_add_ipv4_5tuple_flow_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_add_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_ipv4_5tuple_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_ip_src,
+ (void *) &cmd_fc_add_ipv4_5tuple_ip_dst,
+ (void *) &cmd_fc_add_ipv4_5tuple_port_src,
+ (void *) &cmd_fc_add_ipv4_5tuple_port_dst,
+ (void *) &cmd_fc_add_ipv4_5tuple_proto,
+ (void *) &cmd_fc_add_ipv4_5tuple_port_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_port,
+ (void *) &cmd_fc_add_ipv4_5tuple_flowid_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_flow_id,
+ NULL,
+ },
+};
+
+/*
+ * flow add ipv4_5tuple all
+ */
+
+struct cmd_fc_add_ipv4_5tuple_all_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t ipv4_5tuple_string;
+ cmdline_fixed_string_t all_string;
+ uint32_t n_flows;
+ uint32_t n_ports;
+};
+
+static void
+cmd_fc_add_ipv4_5tuple_all_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_add_ipv4_5tuple_all_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key *key;
+ uint32_t *port_id;
+ uint32_t *flow_id;
+ uint32_t id;
+
+ /* Check input parameters */
+ if (params->n_flows == 0) {
+ printf("Invalid number of flows\n");
+ return;
+ }
+
+ if (params->n_ports == 0) {
+ printf("Invalid number of ports\n");
+ return;
+ }
+
+ /* Memory allocation */
+ key = rte_zmalloc(NULL,
+ N_FLOWS_BULK * sizeof(*key),
+ RTE_CACHE_LINE_SIZE);
+ if (key == NULL) {
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ port_id = rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(*port_id),
+ RTE_CACHE_LINE_SIZE);
+ if (port_id == NULL) {
+ rte_free(key);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ flow_id = rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(*flow_id),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_id == NULL) {
+ rte_free(port_id);
+ rte_free(key);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ /* Flow add */
+ for (id = 0; id < params->n_flows; id++) {
+ uint32_t pos = id & (N_FLOWS_BULK - 1);
+
+ key[pos].type = FLOW_KEY_IPV4_5TUPLE;
+ key[pos].key.ipv4_5tuple.ip_src = 0;
+ key[pos].key.ipv4_5tuple.ip_dst = id;
+ key[pos].key.ipv4_5tuple.port_src = 0;
+ key[pos].key.ipv4_5tuple.port_dst = 0;
+ key[pos].key.ipv4_5tuple.proto = 6;
+
+ port_id[pos] = id % params->n_ports;
+ flow_id[pos] = id;
+
+ if ((pos == N_FLOWS_BULK - 1) ||
+ (id == params->n_flows - 1)) {
+ int status;
+
+ status = app_pipeline_fc_add_bulk(app,
+ params->pipeline_id,
+ key,
+ port_id,
+ flow_id,
+ pos + 1);
+
+ if (status != 0) {
+ printf("Command failed\n");
+
+ break;
+ }
+ }
+ }
+
+ /* Memory free */
+ rte_free(flow_id);
+ rte_free(port_id);
+ rte_free(key);
+}
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
+ flow_string, "flow");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
+ add_string, "add");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_ipv4_5tuple_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
+ ipv4_5tuple_string, "ipv4_5tuple");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_all_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
+ all_string, "all");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_n_flows =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
+ n_flows, UINT32);
+
+cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_n_ports =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
+ n_ports, UINT32);
+
+cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple_all = {
+ .f = cmd_fc_add_ipv4_5tuple_all_parsed,
+ .data = NULL,
+ .help_str = "Flow add all (IPv4 5-tuple)",
+ .tokens = {
+ (void *) &cmd_fc_add_ipv4_5tuple_all_p_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_all_pipeline_id,
+ (void *) &cmd_fc_add_ipv4_5tuple_all_flow_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_all_add_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_all_ipv4_5tuple_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_all_all_string,
+ (void *) &cmd_fc_add_ipv4_5tuple_all_n_flows,
+ (void *) &cmd_fc_add_ipv4_5tuple_all_n_ports,
+ NULL,
+ },
+};
+
+/*
+ * flow add ipv6_5tuple
+ */
+
+struct cmd_fc_add_ipv6_5tuple_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t ipv6_5tuple_string;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint32_t proto;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t flowid_string;
+ uint32_t flow_id;
+};
+
+static void
+cmd_fc_add_ipv6_5tuple_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_add_ipv6_5tuple_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key key;
+ int status;
+
+ memset(&key, 0, sizeof(key));
+ key.type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(key.key.ipv6_5tuple.ip_src,
+ params->ip_src.addr.ipv6.s6_addr,
+ 16);
+ memcpy(key.key.ipv6_5tuple.ip_dst,
+ params->ip_dst.addr.ipv6.s6_addr,
+ 16);
+ key.key.ipv6_5tuple.port_src = params->port_src;
+ key.key.ipv6_5tuple.port_dst = params->port_dst;
+ key.key.ipv6_5tuple.proto = params->proto;
+
+ status = app_pipeline_fc_add(app,
+ params->pipeline_id,
+ &key,
+ params->port,
+ params->flow_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+ flow_string, "flow");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+ add_string, "add");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_ipv6_5tuple_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+ ipv6_5tuple_string, "ipv6_5tuple");
+
+cmdline_parse_token_ipaddr_t cmd_fc_add_ipv6_5tuple_ip_src =
+ TOKEN_IPV6_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, ip_src);
+
+cmdline_parse_token_ipaddr_t cmd_fc_add_ipv6_5tuple_ip_dst =
+ TOKEN_IPV6_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, ip_dst);
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port_src =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port_src,
+ UINT16);
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port_dst =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port_dst,
+ UINT16);
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_proto =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, proto,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+ port_string, "port");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flowid_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
+ flowid_string, "flowid");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_flow_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, flow_id,
+ UINT32);
+
+cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
+ .f = cmd_fc_add_ipv6_5tuple_parsed,
+ .data = NULL,
+ .help_str = "Flow add (IPv6 5-tuple)",
+ .tokens = {
+ (void *) &cmd_fc_add_ipv6_5tuple_p_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_pipeline_id,
+ (void *) &cmd_fc_add_ipv6_5tuple_flow_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_add_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_ipv6_5tuple_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_ip_src,
+ (void *) &cmd_fc_add_ipv6_5tuple_ip_dst,
+ (void *) &cmd_fc_add_ipv6_5tuple_port_src,
+ (void *) &cmd_fc_add_ipv6_5tuple_port_dst,
+ (void *) &cmd_fc_add_ipv6_5tuple_proto,
+ (void *) &cmd_fc_add_ipv6_5tuple_port_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_port,
+ (void *) &cmd_fc_add_ipv6_5tuple_flowid_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_flow_id,
+ NULL,
+ },
+};
+
+/*
+ * flow add ipv6_5tuple all
+ */
+
+struct cmd_fc_add_ipv6_5tuple_all_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t ipv6_5tuple_string;
+ cmdline_fixed_string_t all_string;
+ uint32_t n_flows;
+ uint32_t n_ports;
+};
+
+static void
+cmd_fc_add_ipv6_5tuple_all_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_add_ipv6_5tuple_all_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key *key;
+ uint32_t *port_id;
+ uint32_t *flow_id;
+ uint32_t id;
+
+ /* Check input parameters */
+ if (params->n_flows == 0) {
+ printf("Invalid number of flows\n");
+ return;
+ }
+
+ if (params->n_ports == 0) {
+ printf("Invalid number of ports\n");
+ return;
+ }
+
+ /* Memory allocation */
+ key = rte_zmalloc(NULL,
+ N_FLOWS_BULK * sizeof(*key),
+ RTE_CACHE_LINE_SIZE);
+ if (key == NULL) {
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ port_id = rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(*port_id),
+ RTE_CACHE_LINE_SIZE);
+ if (port_id == NULL) {
+ rte_free(key);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ flow_id = rte_malloc(NULL,
+ N_FLOWS_BULK * sizeof(*flow_id),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_id == NULL) {
+ rte_free(port_id);
+ rte_free(key);
+ printf("Memory allocation failed\n");
+ return;
+ }
+
+ /* Flow add */
+ for (id = 0; id < params->n_flows; id++) {
+ uint32_t pos = id & (N_FLOWS_BULK - 1);
+ uint32_t *x;
+
+ key[pos].type = FLOW_KEY_IPV6_5TUPLE;
+ x = (uint32_t *) key[pos].key.ipv6_5tuple.ip_dst;
+ *x = rte_bswap32(id);
+ key[pos].key.ipv6_5tuple.proto = 6;
+
+ port_id[pos] = id % params->n_ports;
+ flow_id[pos] = id;
+
+ if ((pos == N_FLOWS_BULK - 1) ||
+ (id == params->n_flows - 1)) {
+ int status;
+
+ status = app_pipeline_fc_add_bulk(app,
+ params->pipeline_id,
+ key,
+ port_id,
+ flow_id,
+ pos + 1);
+
+ if (status != 0) {
+ printf("Command failed\n");
+
+ break;
+ }
+ }
+ }
+
+ /* Memory free */
+ rte_free(flow_id);
+ rte_free(port_id);
+ rte_free(key);
+}
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
+ flow_string, "flow");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
+ add_string, "add");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_ipv6_5tuple_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
+ ipv6_5tuple_string, "ipv6_5tuple");
+
+cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_all_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
+ all_string, "all");
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_n_flows =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
+ n_flows, UINT32);
+
+cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_n_ports =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
+ n_ports, UINT32);
+
+cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple_all = {
+ .f = cmd_fc_add_ipv6_5tuple_all_parsed,
+ .data = NULL,
+ .help_str = "Flow add all (ipv6 5-tuple)",
+ .tokens = {
+ (void *) &cmd_fc_add_ipv6_5tuple_all_p_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_all_pipeline_id,
+ (void *) &cmd_fc_add_ipv6_5tuple_all_flow_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_all_add_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_all_ipv6_5tuple_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_all_all_string,
+ (void *) &cmd_fc_add_ipv6_5tuple_all_n_flows,
+ (void *) &cmd_fc_add_ipv6_5tuple_all_n_ports,
+ NULL,
+ },
+};
+
+/*
+ * flow del qinq
+ */
+struct cmd_fc_del_qinq_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t qinq_string;
+ uint16_t svlan;
+ uint16_t cvlan;
+};
+
+static void
+cmd_fc_del_qinq_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_del_qinq_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key key;
+ int status;
+
+ memset(&key, 0, sizeof(key));
+ key.type = FLOW_KEY_QINQ;
+ key.key.qinq.svlan = params->svlan;
+ key.key.qinq.cvlan = params->cvlan;
+ status = app_pipeline_fc_del(app, params->pipeline_id, &key);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_del_qinq_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, p_string, "p");
+
+cmdline_parse_token_num_t cmd_fc_del_qinq_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_del_qinq_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, flow_string,
+ "flow");
+
+cmdline_parse_token_string_t cmd_fc_del_qinq_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, del_string,
+ "del");
+
+cmdline_parse_token_string_t cmd_fc_del_qinq_qinq_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, qinq_string,
+ "qinq");
+
+cmdline_parse_token_num_t cmd_fc_del_qinq_svlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, svlan, UINT16);
+
+cmdline_parse_token_num_t cmd_fc_del_qinq_cvlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, cvlan, UINT16);
+
+cmdline_parse_inst_t cmd_fc_del_qinq = {
+ .f = cmd_fc_del_qinq_parsed,
+ .data = NULL,
+ .help_str = "Flow delete (Q-in-Q)",
+ .tokens = {
+ (void *) &cmd_fc_del_qinq_p_string,
+ (void *) &cmd_fc_del_qinq_pipeline_id,
+ (void *) &cmd_fc_del_qinq_flow_string,
+ (void *) &cmd_fc_del_qinq_del_string,
+ (void *) &cmd_fc_del_qinq_qinq_string,
+ (void *) &cmd_fc_del_qinq_svlan,
+ (void *) &cmd_fc_del_qinq_cvlan,
+ NULL,
+ },
+};
+
+/*
+ * flow del ipv4_5tuple
+ */
+
+struct cmd_fc_del_ipv4_5tuple_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t ipv4_5tuple_string;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint32_t proto;
+};
+
+static void
+cmd_fc_del_ipv4_5tuple_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_del_ipv4_5tuple_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key key;
+ int status;
+
+ memset(&key, 0, sizeof(key));
+ key.type = FLOW_KEY_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.ip_src = rte_bswap32(
+ params->ip_src.addr.ipv4.s_addr);
+ key.key.ipv4_5tuple.ip_dst = rte_bswap32(
+ params->ip_dst.addr.ipv4.s_addr);
+ key.key.ipv4_5tuple.port_src = params->port_src;
+ key.key.ipv4_5tuple.port_dst = params->port_dst;
+ key.key.ipv4_5tuple.proto = params->proto;
+
+ status = app_pipeline_fc_del(app, params->pipeline_id, &key);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ flow_string, "flow");
+
+cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ del_string, "del");
+
+cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_ipv4_5tuple_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ ipv4_5tuple_string, "ipv4_5tuple");
+
+cmdline_parse_token_ipaddr_t cmd_fc_del_ipv4_5tuple_ip_src =
+ TOKEN_IPV4_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ ip_src);
+
+cmdline_parse_token_ipaddr_t cmd_fc_del_ipv4_5tuple_ip_dst =
+ TOKEN_IPV4_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result, ip_dst);
+
+cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_port_src =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ port_src, UINT16);
+
+cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_port_dst =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ port_dst, UINT16);
+
+cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_proto =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
+ proto, UINT32);
+
+cmdline_parse_inst_t cmd_fc_del_ipv4_5tuple = {
+ .f = cmd_fc_del_ipv4_5tuple_parsed,
+ .data = NULL,
+ .help_str = "Flow delete (IPv4 5-tuple)",
+ .tokens = {
+ (void *) &cmd_fc_del_ipv4_5tuple_p_string,
+ (void *) &cmd_fc_del_ipv4_5tuple_pipeline_id,
+ (void *) &cmd_fc_del_ipv4_5tuple_flow_string,
+ (void *) &cmd_fc_del_ipv4_5tuple_del_string,
+ (void *) &cmd_fc_del_ipv4_5tuple_ipv4_5tuple_string,
+ (void *) &cmd_fc_del_ipv4_5tuple_ip_src,
+ (void *) &cmd_fc_del_ipv4_5tuple_ip_dst,
+ (void *) &cmd_fc_del_ipv4_5tuple_port_src,
+ (void *) &cmd_fc_del_ipv4_5tuple_port_dst,
+ (void *) &cmd_fc_del_ipv4_5tuple_proto,
+ NULL,
+ },
+};
+
+/*
+ * flow del ipv6_5tuple
+ */
+
+struct cmd_fc_del_ipv6_5tuple_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t ipv6_5tuple_string;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint32_t proto;
+};
+
+static void
+cmd_fc_del_ipv6_5tuple_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_del_ipv6_5tuple_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_fc_key key;
+ int status;
+
+ memset(&key, 0, sizeof(key));
+ key.type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(key.key.ipv6_5tuple.ip_src,
+ params->ip_src.addr.ipv6.s6_addr,
+ 16);
+ memcpy(key.key.ipv6_5tuple.ip_dst,
+ params->ip_dst.addr.ipv6.s6_addr,
+ 16);
+ key.key.ipv6_5tuple.port_src = params->port_src;
+ key.key.ipv6_5tuple.port_dst = params->port_dst;
+ key.key.ipv6_5tuple.proto = params->proto;
+
+ status = app_pipeline_fc_del(app, params->pipeline_id, &key);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
+ p_string, "p");
+
+cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
+ pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
+ flow_string, "flow");
+
+cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
+ del_string, "del");
+
+cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_ipv6_5tuple_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
+ ipv6_5tuple_string, "ipv6_5tuple");
+
+cmdline_parse_token_ipaddr_t cmd_fc_del_ipv6_5tuple_ip_src =
+ TOKEN_IPV6_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, ip_src);
+
+cmdline_parse_token_ipaddr_t cmd_fc_del_ipv6_5tuple_ip_dst =
+ TOKEN_IPV6_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, ip_dst);
+
+cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_port_src =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, port_src,
+ UINT16);
+
+cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_port_dst =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, port_dst,
+ UINT16);
+
+cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_proto =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, proto,
+ UINT32);
+
+cmdline_parse_inst_t cmd_fc_del_ipv6_5tuple = {
+ .f = cmd_fc_del_ipv6_5tuple_parsed,
+ .data = NULL,
+ .help_str = "Flow delete (IPv6 5-tuple)",
+ .tokens = {
+ (void *) &cmd_fc_del_ipv6_5tuple_p_string,
+ (void *) &cmd_fc_del_ipv6_5tuple_pipeline_id,
+ (void *) &cmd_fc_del_ipv6_5tuple_flow_string,
+ (void *) &cmd_fc_del_ipv6_5tuple_del_string,
+ (void *) &cmd_fc_del_ipv6_5tuple_ipv6_5tuple_string,
+ (void *) &cmd_fc_del_ipv6_5tuple_ip_src,
+ (void *) &cmd_fc_del_ipv6_5tuple_ip_dst,
+ (void *) &cmd_fc_del_ipv6_5tuple_port_src,
+ (void *) &cmd_fc_del_ipv6_5tuple_port_dst,
+ (void *) &cmd_fc_del_ipv6_5tuple_proto,
+ NULL,
+ },
+};
+
+/*
+ * flow add default
+ */
+
+struct cmd_fc_add_default_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t default_string;
+ uint32_t port;
+};
+
+static void
+cmd_fc_add_default_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_add_default_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_fc_add_default(app, params->pipeline_id,
+ params->port);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_add_default_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_fc_add_default_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_default_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_add_default_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, flow_string,
+ "flow");
+
+cmdline_parse_token_string_t cmd_fc_add_default_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, add_string,
+ "add");
+
+cmdline_parse_token_string_t cmd_fc_add_default_default_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result,
+ default_string, "default");
+
+cmdline_parse_token_num_t cmd_fc_add_default_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_add_default_result, port, UINT32);
+
+cmdline_parse_inst_t cmd_fc_add_default = {
+ .f = cmd_fc_add_default_parsed,
+ .data = NULL,
+ .help_str = "Flow add default",
+ .tokens = {
+ (void *) &cmd_fc_add_default_p_string,
+ (void *) &cmd_fc_add_default_pipeline_id,
+ (void *) &cmd_fc_add_default_flow_string,
+ (void *) &cmd_fc_add_default_add_string,
+ (void *) &cmd_fc_add_default_default_string,
+ (void *) &cmd_fc_add_default_port,
+ NULL,
+ },
+};
+
+/*
+ * flow del default
+ */
+
+struct cmd_fc_del_default_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t default_string;
+};
+
+static void
+cmd_fc_del_default_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_del_default_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_fc_del_default(app, params->pipeline_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_del_default_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_fc_del_default_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_del_default_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_fc_del_default_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, flow_string,
+ "flow");
+
+cmdline_parse_token_string_t cmd_fc_del_default_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, del_string,
+ "del");
+
+cmdline_parse_token_string_t cmd_fc_del_default_default_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result,
+ default_string, "default");
+
+cmdline_parse_inst_t cmd_fc_del_default = {
+ .f = cmd_fc_del_default_parsed,
+ .data = NULL,
+ .help_str = "Flow delete default",
+ .tokens = {
+ (void *) &cmd_fc_del_default_p_string,
+ (void *) &cmd_fc_del_default_pipeline_id,
+ (void *) &cmd_fc_del_default_flow_string,
+ (void *) &cmd_fc_del_default_del_string,
+ (void *) &cmd_fc_del_default_default_string,
+ NULL,
+ },
+};
+
+/*
+ * flow ls
+ */
+
+struct cmd_fc_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+static void
+cmd_fc_ls_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_fc_ls_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_fc_ls(app, params->pipeline_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_fc_ls_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result, p_string, "p");
+
+cmdline_parse_token_num_t cmd_fc_ls_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_fc_ls_result, pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_fc_ls_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result,
+ flow_string, "flow");
+
+cmdline_parse_token_string_t cmd_fc_ls_ls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result, ls_string,
+ "ls");
+
+cmdline_parse_inst_t cmd_fc_ls = {
+ .f = cmd_fc_ls_parsed,
+ .data = NULL,
+ .help_str = "Flow list",
+ .tokens = {
+ (void *) &cmd_fc_ls_p_string,
+ (void *) &cmd_fc_ls_pipeline_id,
+ (void *) &cmd_fc_ls_flow_string,
+ (void *) &cmd_fc_ls_ls_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_fc_add_qinq,
+ (cmdline_parse_inst_t *) &cmd_fc_add_ipv4_5tuple,
+ (cmdline_parse_inst_t *) &cmd_fc_add_ipv6_5tuple,
+
+ (cmdline_parse_inst_t *) &cmd_fc_del_qinq,
+ (cmdline_parse_inst_t *) &cmd_fc_del_ipv4_5tuple,
+ (cmdline_parse_inst_t *) &cmd_fc_del_ipv6_5tuple,
+
+ (cmdline_parse_inst_t *) &cmd_fc_add_default,
+ (cmdline_parse_inst_t *) &cmd_fc_del_default,
+
+ (cmdline_parse_inst_t *) &cmd_fc_add_qinq_all,
+ (cmdline_parse_inst_t *) &cmd_fc_add_ipv4_5tuple_all,
+ (cmdline_parse_inst_t *) &cmd_fc_add_ipv6_5tuple_all,
+
+ (cmdline_parse_inst_t *) &cmd_fc_ls,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_flow_classification_fe_ops = {
+ .f_init = app_pipeline_fc_init,
+ .f_free = app_pipeline_fc_free,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_flow_classification = {
+ .name = "FLOW_CLASSIFICATION",
+ .be_ops = &pipeline_flow_classification_be_ops,
+ .fe_ops = &pipeline_flow_classification_fe_ops,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
new file mode 100644
index 00000000..9c775006
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_H__
+#define __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_H__
+
+#include "pipeline.h"
+#include "pipeline_flow_classification_be.h"
+
+enum flow_key_type {
+ FLOW_KEY_QINQ,
+ FLOW_KEY_IPV4_5TUPLE,
+ FLOW_KEY_IPV6_5TUPLE,
+};
+
+struct flow_key_qinq {
+ uint16_t svlan;
+ uint16_t cvlan;
+};
+
+struct flow_key_ipv4_5tuple {
+ uint32_t ip_src;
+ uint32_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint32_t proto;
+};
+
+struct flow_key_ipv6_5tuple {
+ uint8_t ip_src[16];
+ uint8_t ip_dst[16];
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint32_t proto;
+};
+
+struct pipeline_fc_key {
+ enum flow_key_type type;
+ union {
+ struct flow_key_qinq qinq;
+ struct flow_key_ipv4_5tuple ipv4_5tuple;
+ struct flow_key_ipv6_5tuple ipv6_5tuple;
+ } key;
+};
+
+int
+app_pipeline_fc_add(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key,
+ uint32_t port_id,
+ uint32_t flow_id);
+
+int
+app_pipeline_fc_add_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key,
+ uint32_t *port_id,
+ uint32_t *flow_id,
+ uint32_t n_keys);
+
+int
+app_pipeline_fc_del(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key);
+
+int
+app_pipeline_fc_add_default(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_fc_del_default(struct app_params *app,
+ uint32_t pipeline_id);
+
+extern struct pipeline_type pipeline_flow_classification;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
new file mode 100644
index 00000000..70d976d5
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -0,0 +1,811 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_table_hash.h>
+#include <rte_byteorder.h>
+#include <pipeline.h>
+
+#include "pipeline_flow_classification_be.h"
+#include "pipeline_actions_common.h"
+#include "parser.h"
+#include "hash_func.h"
+
+struct pipeline_flow_classification {
+ struct pipeline p;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_FC_MSG_REQS];
+
+ uint32_t n_flows;
+ uint32_t key_size;
+ uint32_t flow_id;
+
+ uint32_t key_offset;
+ uint32_t hash_offset;
+ uint8_t key_mask[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+ uint32_t key_mask_present;
+ uint32_t flow_id_offset;
+
+} __rte_cache_aligned;
+
+static void *
+pipeline_fc_msg_req_custom_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_fc_msg_req_custom_handler,
+};
+
+static void *
+pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fc_msg_req_del_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fc_msg_req_del_default_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_FC_MSG_REQ_FLOW_ADD] =
+ pipeline_fc_msg_req_add_handler,
+ [PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK] =
+ pipeline_fc_msg_req_add_bulk_handler,
+ [PIPELINE_FC_MSG_REQ_FLOW_DEL] =
+ pipeline_fc_msg_req_del_handler,
+ [PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT] =
+ pipeline_fc_msg_req_add_default_handler,
+ [PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT] =
+ pipeline_fc_msg_req_del_default_handler,
+};
+
+/*
+ * Flow table
+ */
+struct flow_table_entry {
+ struct rte_pipeline_table_entry head;
+
+ uint32_t flow_id;
+ uint32_t pad;
+};
+
+rte_table_hash_op_hash hash_func[] = {
+ hash_default_key8,
+ hash_default_key16,
+ hash_default_key24,
+ hash_default_key32,
+ hash_default_key40,
+ hash_default_key48,
+ hash_default_key56,
+ hash_default_key64
+};
+
+/*
+ * Flow table AH - Write flow_id to packet meta-data
+ */
+static inline void
+pkt_work_flow_id(
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry *table_entry,
+ void *arg)
+{
+ struct pipeline_flow_classification *p_fc = arg;
+ uint32_t *flow_id_ptr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, p_fc->flow_id_offset);
+ struct flow_table_entry *entry =
+ (struct flow_table_entry *) table_entry;
+
+ /* Read */
+ uint32_t flow_id = entry->flow_id;
+
+ /* Compute */
+
+ /* Write */
+ *flow_id_ptr = flow_id;
+}
+
+static inline void
+pkt4_work_flow_id(
+ struct rte_mbuf **pkts,
+ struct rte_pipeline_table_entry **table_entries,
+ void *arg)
+{
+ struct pipeline_flow_classification *p_fc = arg;
+
+ uint32_t *flow_id_ptr0 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p_fc->flow_id_offset);
+ uint32_t *flow_id_ptr1 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p_fc->flow_id_offset);
+ uint32_t *flow_id_ptr2 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p_fc->flow_id_offset);
+ uint32_t *flow_id_ptr3 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p_fc->flow_id_offset);
+
+ struct flow_table_entry *entry0 =
+ (struct flow_table_entry *) table_entries[0];
+ struct flow_table_entry *entry1 =
+ (struct flow_table_entry *) table_entries[1];
+ struct flow_table_entry *entry2 =
+ (struct flow_table_entry *) table_entries[2];
+ struct flow_table_entry *entry3 =
+ (struct flow_table_entry *) table_entries[3];
+
+ /* Read */
+ uint32_t flow_id0 = entry0->flow_id;
+ uint32_t flow_id1 = entry1->flow_id;
+ uint32_t flow_id2 = entry2->flow_id;
+ uint32_t flow_id3 = entry3->flow_id;
+
+ /* Compute */
+
+ /* Write */
+ *flow_id_ptr0 = flow_id0;
+ *flow_id_ptr1 = flow_id1;
+ *flow_id_ptr2 = flow_id2;
+ *flow_id_ptr3 = flow_id3;
+}
+
+PIPELINE_TABLE_AH_HIT(fc_table_ah_hit,
+ pkt_work_flow_id, pkt4_work_flow_id);
+
+static rte_pipeline_table_action_handler_hit
+get_fc_table_ah_hit(struct pipeline_flow_classification *p)
+{
+ if (p->flow_id)
+ return fc_table_ah_hit;
+
+ return NULL;
+}
+
+/*
+ * Argument parsing
+ */
+static int
+pipeline_fc_parse_args(struct pipeline_flow_classification *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_flows_present = 0;
+ uint32_t key_offset_present = 0;
+ uint32_t key_size_present = 0;
+ uint32_t hash_offset_present = 0;
+ uint32_t key_mask_present = 0;
+ uint32_t flow_id_offset_present = 0;
+
+ uint32_t i;
+ char key_mask_str[PIPELINE_FC_FLOW_KEY_MAX_SIZE * 2];
+
+ p->hash_offset = 0;
+
+ /* default values */
+ p->flow_id = 0;
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* n_flows */
+ if (strcmp(arg_name, "n_flows") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_flows_present == 0, params->name,
+ arg_name);
+ n_flows_present = 1;
+
+ status = parser_read_uint32(&p->n_flows,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->n_flows != 0)), params->name,
+ arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* key_offset */
+ if (strcmp(arg_name, "key_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ key_offset_present == 0, params->name,
+ arg_name);
+ key_offset_present = 1;
+
+ status = parser_read_uint32(&p->key_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* key_size */
+ if (strcmp(arg_name, "key_size") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ key_size_present == 0, params->name,
+ arg_name);
+ key_size_present = 1;
+
+ status = parser_read_uint32(&p->key_size,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->key_size != 0) &&
+ (p->key_size % 8 == 0)),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
+ (p->key_size <=
+ PIPELINE_FC_FLOW_KEY_MAX_SIZE)),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* key_mask */
+ if (strcmp(arg_name, "key_mask") == 0) {
+ int mask_str_len = strlen(arg_value);
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ key_mask_present == 0,
+ params->name, arg_name);
+ key_mask_present = 1;
+
+ PIPELINE_ARG_CHECK((mask_str_len <
+ (PIPELINE_FC_FLOW_KEY_MAX_SIZE * 2)),
+ "Parse error in section \"%s\": entry "
+ "\"%s\" is too long", params->name,
+ arg_name);
+
+ snprintf(key_mask_str, sizeof(key_mask_str), "%s",
+ arg_value);
+
+ continue;
+ }
+
+ /* hash_offset */
+ if (strcmp(arg_name, "hash_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ hash_offset_present == 0, params->name,
+ arg_name);
+ hash_offset_present = 1;
+
+ status = parser_read_uint32(&p->hash_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* flow_id_offset */
+ if (strcmp(arg_name, "flowid_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ flow_id_offset_present == 0, params->name,
+ arg_name);
+ flow_id_offset_present = 1;
+
+ status = parser_read_uint32(&p->flow_id_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->flow_id = 1;
+
+ continue;
+ }
+
+ /* Unknown argument */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check that mandatory arguments are present */
+ PIPELINE_PARSE_ERR_MANDATORY((n_flows_present), params->name,
+ "n_flows");
+ PIPELINE_PARSE_ERR_MANDATORY((key_offset_present), params->name,
+ "key_offset");
+ PIPELINE_PARSE_ERR_MANDATORY((key_size_present), params->name,
+ "key_size");
+
+ if (key_mask_present) {
+ uint32_t key_size = p->key_size;
+ int status;
+
+ PIPELINE_ARG_CHECK(((key_size == 8) || (key_size == 16)),
+ "Parse error in section \"%s\": entry key_mask "
+ "only allowed for key_size of 8 or 16 bytes",
+ params->name);
+
+ PIPELINE_ARG_CHECK((strlen(key_mask_str) ==
+ (key_size * 2)), "Parse error in section "
+ "\"%s\": key_mask should have exactly %u hex "
+ "digits", params->name, (key_size * 2));
+
+ PIPELINE_ARG_CHECK((hash_offset_present == 0), "Parse "
+ "error in section \"%s\": entry hash_offset only "
+ "allowed when key_mask is not present",
+ params->name);
+
+ status = parse_hex_string(key_mask_str, p->key_mask,
+ &p->key_size);
+
+ PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
+ (key_size == p->key_size)), params->name,
+ "key_mask", key_mask_str);
+ }
+
+ p->key_mask_present = key_mask_present;
+
+ return 0;
+}
+
+static void *pipeline_fc_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_flow_classification *p_fc;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if (params == NULL)
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct pipeline_flow_classification));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+ p_fc = (struct pipeline_flow_classification *) p;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Flow classification");
+
+ /* Parse arguments */
+ if (pipeline_fc_parse_args(p_fc, params))
+ return NULL;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ p->n_tables = 1;
+ {
+ struct rte_table_hash_key8_ext_params
+ table_hash_key8_params = {
+ .n_entries = p_fc->n_flows,
+ .n_entries_ext = p_fc->n_flows,
+ .signature_offset = p_fc->hash_offset,
+ .key_offset = p_fc->key_offset,
+ .f_hash = hash_func[(p_fc->key_size / 8) - 1],
+ .key_mask = (p_fc->key_mask_present) ?
+ p_fc->key_mask : NULL,
+ .seed = 0,
+ };
+
+ struct rte_table_hash_key16_ext_params
+ table_hash_key16_params = {
+ .n_entries = p_fc->n_flows,
+ .n_entries_ext = p_fc->n_flows,
+ .signature_offset = p_fc->hash_offset,
+ .key_offset = p_fc->key_offset,
+ .f_hash = hash_func[(p_fc->key_size / 8) - 1],
+ .key_mask = (p_fc->key_mask_present) ?
+ p_fc->key_mask : NULL,
+ .seed = 0,
+ };
+
+ struct rte_table_hash_ext_params
+ table_hash_params = {
+ .key_size = p_fc->key_size,
+ .n_keys = p_fc->n_flows,
+ .n_buckets = p_fc->n_flows / 4,
+ .n_buckets_ext = p_fc->n_flows / 4,
+ .f_hash = hash_func[(p_fc->key_size / 8) - 1],
+ .seed = 0,
+ .signature_offset = p_fc->hash_offset,
+ .key_offset = p_fc->key_offset,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = NULL, /* set below */
+ .arg_create = NULL, /* set below */
+ .f_action_hit = get_fc_table_ah_hit(p_fc),
+ .f_action_miss = NULL,
+ .arg_ah = p_fc,
+ .action_data_size = sizeof(struct flow_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ switch (p_fc->key_size) {
+ case 8:
+ if (p_fc->hash_offset != 0) {
+ table_params.ops =
+ &rte_table_hash_key8_ext_ops;
+ } else {
+ table_params.ops =
+ &rte_table_hash_key8_ext_dosig_ops;
+ }
+ table_params.arg_create = &table_hash_key8_params;
+ break;
+
+ case 16:
+ if (p_fc->hash_offset != 0) {
+ table_params.ops =
+ &rte_table_hash_key16_ext_ops;
+ } else {
+ table_params.ops =
+ &rte_table_hash_key16_ext_dosig_ops;
+ }
+ table_params.arg_create = &table_hash_key16_params;
+ break;
+
+ default:
+ table_params.ops = &rte_table_hash_ext_ops;
+ table_params.arg_create = &table_hash_params;
+ }
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_fc->custom_handlers,
+ custom_handlers,
+ sizeof(p_fc->custom_handlers));
+
+ return p;
+}
+
+static int
+pipeline_fc_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_fc_track(void *pipeline,
+ __rte_unused uint32_t port_in,
+ uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_in == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int
+pipeline_fc_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+static void *
+pipeline_fc_msg_req_custom_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_classification *p_fc =
+ (struct pipeline_flow_classification *) p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_FC_MSG_REQS) ?
+ p_fc->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+static void *
+pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_add_msg_req *req = msg;
+ struct pipeline_fc_add_msg_rsp *rsp = msg;
+
+ struct flow_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+ .flow_id = req->flow_id,
+ };
+
+ rsp->status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &req->key,
+ (struct rte_pipeline_table_entry *) &entry,
+ &rsp->key_found,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+static void *
+pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_add_bulk_msg_req *req = msg;
+ struct pipeline_fc_add_bulk_msg_rsp *rsp = msg;
+ uint32_t i;
+
+ for (i = 0; i < req->n_keys; i++) {
+ struct pipeline_fc_add_bulk_flow_req *flow_req = &req->req[i];
+ struct pipeline_fc_add_bulk_flow_rsp *flow_rsp = &req->rsp[i];
+
+ struct flow_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[flow_req->port_id]},
+ },
+ .flow_id = flow_req->flow_id,
+ };
+
+ int status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &flow_req->key,
+ (struct rte_pipeline_table_entry *) &entry,
+ &flow_rsp->key_found,
+ (struct rte_pipeline_table_entry **)
+ &flow_rsp->entry_ptr);
+
+ if (status)
+ break;
+ }
+
+ rsp->n_keys = i;
+
+ return rsp;
+}
+
+static void *
+pipeline_fc_msg_req_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_del_msg_req *req = msg;
+ struct pipeline_fc_del_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ p->table_id[0],
+ &req->key,
+ &rsp->key_found,
+ NULL);
+
+ return rsp;
+}
+
+static void *
+pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_add_default_msg_req *req = msg;
+ struct pipeline_fc_add_default_msg_rsp *rsp = msg;
+
+ struct flow_table_entry default_entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+
+ .flow_id = 0,
+ };
+
+ rsp->status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[0],
+ (struct rte_pipeline_table_entry *) &default_entry,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+static void *
+pipeline_fc_msg_req_del_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_del_default_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ p->table_id[0],
+ NULL);
+
+ return rsp;
+}
+
+struct pipeline_be_ops pipeline_flow_classification_be_ops = {
+ .f_init = pipeline_fc_init,
+ .f_free = pipeline_fc_free,
+ .f_run = NULL,
+ .f_timer = pipeline_fc_timer,
+ .f_track = pipeline_fc_track,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
new file mode 100644
index 00000000..d8129b21
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
@@ -0,0 +1,142 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_BE_H__
+#define __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_BE_H__
+
+#include "pipeline_common_be.h"
+
+enum pipeline_fc_msg_req_type {
+ PIPELINE_FC_MSG_REQ_FLOW_ADD = 0,
+ PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK,
+ PIPELINE_FC_MSG_REQ_FLOW_DEL,
+ PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT,
+ PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT,
+ PIPELINE_FC_MSG_REQS,
+};
+
+#ifndef PIPELINE_FC_FLOW_KEY_MAX_SIZE
+#define PIPELINE_FC_FLOW_KEY_MAX_SIZE 64
+#endif
+
+/*
+ * MSG ADD
+ */
+struct pipeline_fc_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+
+ uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+
+ uint32_t port_id;
+ uint32_t flow_id;
+};
+
+struct pipeline_fc_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ADD BULK
+ */
+struct pipeline_fc_add_bulk_flow_req {
+ uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+ uint32_t port_id;
+ uint32_t flow_id;
+};
+
+struct pipeline_fc_add_bulk_flow_rsp {
+ int key_found;
+ void *entry_ptr;
+};
+
+struct pipeline_fc_add_bulk_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+
+ struct pipeline_fc_add_bulk_flow_req *req;
+ struct pipeline_fc_add_bulk_flow_rsp *rsp;
+ uint32_t n_keys;
+};
+
+struct pipeline_fc_add_bulk_msg_rsp {
+ uint32_t n_keys;
+};
+
+/*
+ * MSG DEL
+ */
+struct pipeline_fc_del_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+
+ uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+};
+
+struct pipeline_fc_del_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ADD DEFAULT
+ */
+struct pipeline_fc_add_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+
+ uint32_t port_id;
+};
+
+struct pipeline_fc_add_default_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG DEL DEFAULT
+ */
+struct pipeline_fc_del_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+};
+
+struct pipeline_fc_del_default_msg_rsp {
+ int status;
+};
+
+extern struct pipeline_be_ops pipeline_flow_classification_be_ops;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_master.c b/examples/ip_pipeline/pipeline/pipeline_master.c
new file mode 100644
index 00000000..1ccdad14
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_master.c
@@ -0,0 +1,47 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pipeline_master.h"
+#include "pipeline_master_be.h"
+
+static struct pipeline_fe_ops pipeline_master_fe_ops = {
+ .f_init = NULL,
+ .f_free = NULL,
+ .cmds = NULL,
+};
+
+struct pipeline_type pipeline_master = {
+ .name = "MASTER",
+ .be_ops = &pipeline_master_be_ops,
+ .fe_ops = &pipeline_master_fe_ops,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_master.h b/examples/ip_pipeline/pipeline/pipeline_master.h
new file mode 100644
index 00000000..3fe3030f
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_master.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_MASTER_H__
+#define __INCLUDE_PIPELINE_MASTER_H__
+
+#include "pipeline.h"
+
+extern struct pipeline_type pipeline_master;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_master_be.c b/examples/ip_pipeline/pipeline/pipeline_master_be.c
new file mode 100644
index 00000000..ac0cbbc5
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_master_be.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "app.h"
+#include "pipeline_master_be.h"
+
+struct pipeline_master {
+ struct app_params *app;
+ struct cmdline *cl;
+ int script_file_done;
+} __rte_cache_aligned;
+
+static void*
+pipeline_init(__rte_unused struct pipeline_params *params, void *arg)
+{
+ struct app_params *app = (struct app_params *) arg;
+ struct pipeline_master *p;
+ uint32_t size;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_master));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->app = app;
+
+ p->cl = cmdline_stdin_new(app->cmds, "pipeline> ");
+ if (p->cl == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+
+ p->script_file_done = 0;
+ if (app->script_file == NULL)
+ p->script_file_done = 1;
+
+ return (void *) p;
+}
+
+static int
+pipeline_free(void *pipeline)
+{
+ struct pipeline_master *p = (struct pipeline_master *) pipeline;
+
+ if (p == NULL)
+ return -EINVAL;
+
+ cmdline_stdin_exit(p->cl);
+ rte_free(p);
+
+ return 0;
+}
+
+static int
+pipeline_run(void *pipeline)
+{
+ struct pipeline_master *p = (struct pipeline_master *) pipeline;
+ int status;
+
+ if (p->script_file_done == 0) {
+ struct app_params *app = p->app;
+ int fd = open(app->script_file, O_RDONLY);
+
+ if (fd < 0)
+ printf("Cannot open CLI script file \"%s\"\n",
+ app->script_file);
+ else {
+ struct cmdline *file_cl;
+
+ printf("Running CLI script file \"%s\" ...\n",
+ app->script_file);
+ file_cl = cmdline_new(p->cl->ctx, "", fd, 1);
+ cmdline_interact(file_cl);
+ close(fd);
+ }
+
+ p->script_file_done = 1;
+ }
+
+ status = cmdline_poll(p->cl);
+ if (status < 0)
+ rte_panic("CLI poll error (%" PRId32 ")\n", status);
+ else if (status == RDLINE_EXITED) {
+ cmdline_stdin_exit(p->cl);
+ rte_exit(0, "Bye!\n");
+ }
+
+ return 0;
+}
+
+static int
+pipeline_timer(__rte_unused void *pipeline)
+{
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_master_be_ops = {
+ .f_init = pipeline_init,
+ .f_free = pipeline_free,
+ .f_run = pipeline_run,
+ .f_timer = pipeline_timer,
+ .f_track = NULL,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_master_be.h b/examples/ip_pipeline/pipeline/pipeline_master_be.h
new file mode 100644
index 00000000..00b71fe8
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_master_be.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_MASTER_BE_H__
+#define __INCLUDE_PIPELINE_MASTER_BE_H__
+
+#include "pipeline_common_be.h"
+
+extern struct pipeline_be_ops pipeline_master_be_ops;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough.c b/examples/ip_pipeline/pipeline/pipeline_passthrough.c
new file mode 100644
index 00000000..fc2cae5e
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough.c
@@ -0,0 +1,47 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pipeline_passthrough.h"
+#include "pipeline_passthrough_be.h"
+
+static struct pipeline_fe_ops pipeline_passthrough_fe_ops = {
+ .f_init = NULL,
+ .f_free = NULL,
+ .cmds = NULL,
+};
+
+struct pipeline_type pipeline_passthrough = {
+ .name = "PASS-THROUGH",
+ .be_ops = &pipeline_passthrough_be_ops,
+ .fe_ops = &pipeline_passthrough_fe_ops,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough.h b/examples/ip_pipeline/pipeline/pipeline_passthrough.h
new file mode 100644
index 00000000..420a8768
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_H__
+#define __INCLUDE_PIPELINE_PASSTHROUGH_H__
+
+#include "pipeline.h"
+
+extern struct pipeline_type pipeline_passthrough;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
new file mode 100644
index 00000000..a0d11aea
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
@@ -0,0 +1,804 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_table_stub.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_passthrough_be.h"
+#include "pipeline_actions_common.h"
+#include "parser.h"
+#include "hash_func.h"
+
+struct pipeline_passthrough {
+ struct pipeline p;
+ struct pipeline_passthrough_params params;
+ rte_table_hash_op_hash f_hash;
+} __rte_cache_aligned;
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_msg_req_invalid_handler,
+};
+
+static inline __attribute__((always_inline)) void
+pkt_work(
+ struct rte_mbuf *pkt,
+ void *arg,
+ uint32_t dma_size,
+ uint32_t hash_enabled,
+ uint32_t lb_hash,
+ uint32_t port_out_pow2)
+{
+ struct pipeline_passthrough *p = arg;
+
+ uint64_t *dma_dst = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ p->params.dma_dst_offset);
+ uint64_t *dma_src = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ p->params.dma_src_offset);
+ uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
+ uint32_t *dma_hash = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ p->params.dma_hash_offset);
+ uint32_t i;
+
+ /* Read (dma_src), compute (dma_dst), write (dma_dst) */
+ for (i = 0; i < (dma_size / 8); i++)
+ dma_dst[i] = dma_src[i] & dma_mask[i];
+
+ /* Read (dma_dst), compute (hash), write (hash) */
+ if (hash_enabled) {
+ uint32_t hash = p->f_hash(dma_dst, dma_size, 0);
+ *dma_hash = hash;
+
+ if (lb_hash) {
+ uint32_t port_out;
+
+ if (port_out_pow2)
+ port_out
+ = hash & (p->p.n_ports_out - 1);
+ else
+ port_out
+ = hash % p->p.n_ports_out;
+
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out, pkt);
+ }
+ }
+}
+
+static inline __attribute__((always_inline)) void
+pkt4_work(
+ struct rte_mbuf **pkts,
+ void *arg,
+ uint32_t dma_size,
+ uint32_t hash_enabled,
+ uint32_t lb_hash,
+ uint32_t port_out_pow2)
+{
+ struct pipeline_passthrough *p = arg;
+
+ uint64_t *dma_dst0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ p->params.dma_dst_offset);
+
+ uint64_t *dma_src0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ p->params.dma_src_offset);
+ uint64_t *dma_src1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ p->params.dma_src_offset);
+ uint64_t *dma_src2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ p->params.dma_src_offset);
+ uint64_t *dma_src3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ p->params.dma_src_offset);
+
+ uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
+
+ uint32_t *dma_hash0 = RTE_MBUF_METADATA_UINT32_PTR(pkts[0],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash1 = RTE_MBUF_METADATA_UINT32_PTR(pkts[1],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash2 = RTE_MBUF_METADATA_UINT32_PTR(pkts[2],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash3 = RTE_MBUF_METADATA_UINT32_PTR(pkts[3],
+ p->params.dma_hash_offset);
+
+ uint32_t i;
+
+ /* Read (dma_src), compute (dma_dst), write (dma_dst) */
+ for (i = 0; i < (dma_size / 8); i++) {
+ dma_dst0[i] = dma_src0[i] & dma_mask[i];
+ dma_dst1[i] = dma_src1[i] & dma_mask[i];
+ dma_dst2[i] = dma_src2[i] & dma_mask[i];
+ dma_dst3[i] = dma_src3[i] & dma_mask[i];
+ }
+
+ /* Read (dma_dst), compute (hash), write (hash) */
+ if (hash_enabled) {
+ uint32_t hash0 = p->f_hash(dma_dst0, dma_size, 0);
+ uint32_t hash1 = p->f_hash(dma_dst1, dma_size, 0);
+ uint32_t hash2 = p->f_hash(dma_dst2, dma_size, 0);
+ uint32_t hash3 = p->f_hash(dma_dst3, dma_size, 0);
+
+ *dma_hash0 = hash0;
+ *dma_hash1 = hash1;
+ *dma_hash2 = hash2;
+ *dma_hash3 = hash3;
+
+ if (lb_hash) {
+ uint32_t port_out0, port_out1, port_out2, port_out3;
+
+ if (port_out_pow2) {
+ port_out0
+ = hash0 & (p->p.n_ports_out - 1);
+ port_out1
+ = hash1 & (p->p.n_ports_out - 1);
+ port_out2
+ = hash2 & (p->p.n_ports_out - 1);
+ port_out3
+ = hash3 & (p->p.n_ports_out - 1);
+ } else {
+ port_out0
+ = hash0 % p->p.n_ports_out;
+ port_out1
+ = hash1 % p->p.n_ports_out;
+ port_out2
+ = hash2 % p->p.n_ports_out;
+ port_out3
+ = hash3 % p->p.n_ports_out;
+ }
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out0, pkts[0]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out1, pkts[1]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out2, pkts[2]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out3, pkts[3]);
+ }
+ }
+}
+
+#define PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+static inline void \
+pkt_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2( \
+ struct rte_mbuf *pkt, \
+ void *arg) \
+{ \
+ pkt_work(pkt, arg, dma_size, hash_enabled, lb_hash, port_pow2); \
+}
+
+#define PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+static inline void \
+pkt4_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2( \
+ struct rte_mbuf **pkts, \
+ void *arg) \
+{ \
+ pkt4_work(pkts, arg, dma_size, hash_enabled, lb_hash, port_pow2); \
+}
+
+#define port_in_ah(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+PIPELINE_PORT_IN_AH(port_in_ah_size##dma_size##_hash \
+ ##hash_enabled##_lb##lb_hash##_pw##port_pow2, \
+ pkt_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt4_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2)
+
+
+#define port_in_ah_lb(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+PIPELINE_PORT_IN_AH_HIJACK_ALL( \
+ port_in_ah_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt4_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2)
+
+/* Port in AH (dma_size, hash_enabled, lb_hash, port_pow2) */
+
+port_in_ah(8, 0, 0, 0)
+port_in_ah(8, 1, 0, 0)
+port_in_ah_lb(8, 1, 1, 0)
+port_in_ah_lb(8, 1, 1, 1)
+
+port_in_ah(16, 0, 0, 0)
+port_in_ah(16, 1, 0, 0)
+port_in_ah_lb(16, 1, 1, 0)
+port_in_ah_lb(16, 1, 1, 1)
+
+port_in_ah(24, 0, 0, 0)
+port_in_ah(24, 1, 0, 0)
+port_in_ah_lb(24, 1, 1, 0)
+port_in_ah_lb(24, 1, 1, 1)
+
+port_in_ah(32, 0, 0, 0)
+port_in_ah(32, 1, 0, 0)
+port_in_ah_lb(32, 1, 1, 0)
+port_in_ah_lb(32, 1, 1, 1)
+
+port_in_ah(40, 0, 0, 0)
+port_in_ah(40, 1, 0, 0)
+port_in_ah_lb(40, 1, 1, 0)
+port_in_ah_lb(40, 1, 1, 1)
+
+port_in_ah(48, 0, 0, 0)
+port_in_ah(48, 1, 0, 0)
+port_in_ah_lb(48, 1, 1, 0)
+port_in_ah_lb(48, 1, 1, 1)
+
+port_in_ah(56, 0, 0, 0)
+port_in_ah(56, 1, 0, 0)
+port_in_ah_lb(56, 1, 1, 0)
+port_in_ah_lb(56, 1, 1, 1)
+
+port_in_ah(64, 0, 0, 0)
+port_in_ah(64, 1, 0, 0)
+port_in_ah_lb(64, 1, 1, 0)
+port_in_ah_lb(64, 1, 1, 1)
+
+static rte_pipeline_port_in_action_handler
+get_port_in_ah(struct pipeline_passthrough *p)
+{
+ if (p->params.dma_enabled == 0)
+ return NULL;
+
+ if (p->params.dma_hash_enabled) {
+ if (p->params.lb_hash_enabled) {
+ if (rte_is_power_of_2(p->p.n_ports_out))
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_size8_hash1_lb1_pw1;
+ case 16: return port_in_ah_size16_hash1_lb1_pw1;
+ case 24: return port_in_ah_size24_hash1_lb1_pw1;
+ case 32: return port_in_ah_size32_hash1_lb1_pw1;
+ case 40: return port_in_ah_size40_hash1_lb1_pw1;
+ case 48: return port_in_ah_size48_hash1_lb1_pw1;
+ case 56: return port_in_ah_size56_hash1_lb1_pw1;
+ case 64: return port_in_ah_size64_hash1_lb1_pw1;
+ default: return NULL;
+ }
+ else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_size8_hash1_lb1_pw0;
+ case 16: return port_in_ah_size16_hash1_lb1_pw0;
+ case 24: return port_in_ah_size24_hash1_lb1_pw0;
+ case 32: return port_in_ah_size32_hash1_lb1_pw0;
+ case 40: return port_in_ah_size40_hash1_lb1_pw0;
+ case 48: return port_in_ah_size48_hash1_lb1_pw0;
+ case 56: return port_in_ah_size56_hash1_lb1_pw0;
+ case 64: return port_in_ah_size64_hash1_lb1_pw0;
+ default: return NULL;
+ }
+ } else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_size8_hash1_lb0_pw0;
+ case 16: return port_in_ah_size16_hash1_lb0_pw0;
+ case 24: return port_in_ah_size24_hash1_lb0_pw0;
+ case 32: return port_in_ah_size32_hash1_lb0_pw0;
+ case 40: return port_in_ah_size40_hash1_lb0_pw0;
+ case 48: return port_in_ah_size48_hash1_lb0_pw0;
+ case 56: return port_in_ah_size56_hash1_lb0_pw0;
+ case 64: return port_in_ah_size64_hash1_lb0_pw0;
+ default: return NULL;
+ }
+ } else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_size8_hash0_lb0_pw0;
+ case 16: return port_in_ah_size16_hash0_lb0_pw0;
+ case 24: return port_in_ah_size24_hash0_lb0_pw0;
+ case 32: return port_in_ah_size32_hash0_lb0_pw0;
+ case 40: return port_in_ah_size40_hash0_lb0_pw0;
+ case 48: return port_in_ah_size48_hash0_lb0_pw0;
+ case 56: return port_in_ah_size56_hash0_lb0_pw0;
+ case 64: return port_in_ah_size64_hash0_lb0_pw0;
+ default: return NULL;
+ }
+}
+
+int
+pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
+ struct pipeline_params *params)
+{
+ uint32_t dma_dst_offset_present = 0;
+ uint32_t dma_src_offset_present = 0;
+ uint32_t dma_src_mask_present = 0;
+ uint32_t dma_size_present = 0;
+ uint32_t dma_hash_offset_present = 0;
+ uint32_t lb_present = 0;
+ uint32_t i;
+ char dma_mask_str[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2];
+
+ /* default values */
+ p->dma_enabled = 0;
+ p->dma_hash_enabled = 0;
+ p->lb_hash_enabled = 0;
+ memset(p->dma_src_mask, 0xFF, sizeof(p->dma_src_mask));
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* dma_dst_offset */
+ if (strcmp(arg_name, "dma_dst_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_dst_offset_present == 0, params->name,
+ arg_name);
+ dma_dst_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_dst_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_src_offset */
+ if (strcmp(arg_name, "dma_src_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_src_offset_present == 0, params->name,
+ arg_name);
+ dma_src_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_src_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_size */
+ if (strcmp(arg_name, "dma_size") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_size_present == 0, params->name,
+ arg_name);
+ dma_size_present = 1;
+
+ status = parser_read_uint32(&p->dma_size,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->dma_size != 0) &&
+ ((p->dma_size % 8) == 0)),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
+ (p->dma_size <=
+ PIPELINE_PASSTHROUGH_DMA_SIZE_MAX)),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_src_mask */
+ if (strcmp(arg_name, "dma_src_mask") == 0) {
+ int mask_str_len = strlen(arg_value);
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_src_mask_present == 0,
+ params->name, arg_name);
+ dma_src_mask_present = 1;
+
+ PIPELINE_ARG_CHECK((mask_str_len <
+ (PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2)),
+ "Parse error in section \"%s\": entry "
+ "\"%s\" too long", params->name,
+ arg_name);
+
+ snprintf(dma_mask_str, mask_str_len + 1,
+ "%s", arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_hash_offset */
+ if (strcmp(arg_name, "dma_hash_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_hash_offset_present == 0,
+ params->name, arg_name);
+ dma_hash_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_hash_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_hash_enabled = 1;
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* load_balance mode */
+ if (strcmp(arg_name, "lb") == 0) {
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ lb_present == 0,
+ params->name, arg_name);
+ lb_present = 1;
+
+ if ((strcmp(arg_value, "hash") == 0) ||
+ (strcmp(arg_value, "HASH") == 0))
+ p->lb_hash_enabled = 1;
+ else
+ PIPELINE_PARSE_ERR_INV_VAL(0,
+ params->name,
+ arg_name,
+ arg_value);
+
+ continue;
+ }
+
+ /* any other */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check correlations between arguments */
+ PIPELINE_ARG_CHECK((dma_dst_offset_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_dst_offset\"", params->name);
+ PIPELINE_ARG_CHECK((dma_src_offset_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_src_offset\"", params->name);
+ PIPELINE_ARG_CHECK((dma_size_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_size\"", params->name);
+ PIPELINE_ARG_CHECK((dma_hash_offset_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_hash_offset\"", params->name);
+ PIPELINE_ARG_CHECK((p->lb_hash_enabled <= p->dma_hash_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_hash_offset\"", params->name);
+
+ if (dma_src_mask_present) {
+ uint32_t dma_size = p->dma_size;
+ int status;
+
+ PIPELINE_ARG_CHECK((strlen(dma_mask_str) ==
+ (dma_size * 2)), "Parse error in section "
+ "\"%s\": dma_src_mask should have exactly %u hex "
+ "digits", params->name, (dma_size * 2));
+
+ status = parse_hex_string(dma_mask_str, p->dma_src_mask,
+ &p->dma_size);
+
+ PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
+ (dma_size == p->dma_size)), params->name,
+ "dma_src_mask", dma_mask_str);
+ }
+
+ return 0;
+}
+
+
+static rte_table_hash_op_hash
+get_hash_function(struct pipeline_passthrough *p)
+{
+ switch (p->params.dma_size) {
+
+ case 8: return hash_default_key8;
+ case 16: return hash_default_key16;
+ case 24: return hash_default_key24;
+ case 32: return hash_default_key32;
+ case 40: return hash_default_key40;
+ case 48: return hash_default_key48;
+ case 56: return hash_default_key56;
+ case 64: return hash_default_key64;
+ default: return NULL;
+ }
+}
+
+static void*
+pipeline_passthrough_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_passthrough *p_pt;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0) ||
+ (params->n_ports_in < params->n_ports_out) ||
+ (params->n_ports_in % params->n_ports_out))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_passthrough));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_pt = (struct pipeline_passthrough *) p;
+ if (p == NULL)
+ return NULL;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Pass-through");
+
+ /* Parse arguments */
+ if (pipeline_passthrough_parse_args(&p_pt->params, params))
+ return NULL;
+ p_pt->f_hash = get_hash_function(p_pt);
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "PASS-THROUGH",
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+ p->n_tables = p->n_ports_in;
+
+ /*Input ports*/
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = get_port_in_ah(p_pt),
+ .arg_ah = p_pt,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_stub_ops,
+ .arg_create = NULL,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ int status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Add entries to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_table_entry default_entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[
+ i / (p->n_ports_in / p->n_ports_out)]},
+ };
+
+ struct rte_pipeline_table_entry *default_entry_ptr;
+
+ int status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[i],
+ &default_entry,
+ &default_entry_ptr);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+
+ return p;
+}
+
+static int
+pipeline_passthrough_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_passthrough_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+static int
+pipeline_passthrough_track(void *pipeline, uint32_t port_in, uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ *port_out = port_in / p->n_ports_in;
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_passthrough_be_ops = {
+ .f_init = pipeline_passthrough_init,
+ .f_free = pipeline_passthrough_free,
+ .f_run = NULL,
+ .f_timer = pipeline_passthrough_timer,
+ .f_track = pipeline_passthrough_track,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h
new file mode 100644
index 00000000..9368cec7
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h
@@ -0,0 +1,59 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
+#define __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
+
+#include "pipeline_common_be.h"
+
+#define PIPELINE_PASSTHROUGH_DMA_SIZE_MAX 64
+
+struct pipeline_passthrough_params {
+ uint32_t dma_enabled;
+ uint32_t dma_dst_offset;
+ uint32_t dma_src_offset;
+ uint8_t dma_src_mask[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX];
+ uint32_t dma_size;
+
+ uint32_t dma_hash_enabled;
+ uint32_t dma_hash_offset;
+ uint32_t lb_hash_enabled;
+};
+
+int
+pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
+ struct pipeline_params *params);
+
+extern struct pipeline_be_ops pipeline_passthrough_be_ops;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.c b/examples/ip_pipeline/pipeline/pipeline_routing.c
new file mode 100644
index 00000000..eab89f2e
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_routing.c
@@ -0,0 +1,2239 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_routing.h"
+
+struct app_pipeline_routing_route {
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data data;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_routing_route) node;
+};
+
+struct app_pipeline_routing_arp_entry {
+ struct pipeline_routing_arp_key key;
+ struct ether_addr macaddr;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_routing_arp_entry) node;
+};
+
+struct pipeline_routing {
+ /* Parameters */
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+
+ /* Routes */
+ TAILQ_HEAD(, app_pipeline_routing_route) routes;
+ uint32_t n_routes;
+
+ uint32_t default_route_present;
+ uint32_t default_route_port_id;
+ void *default_route_entry_ptr;
+
+ /* ARP entries */
+ TAILQ_HEAD(, app_pipeline_routing_arp_entry) arp_entries;
+ uint32_t n_arp_entries;
+
+ uint32_t default_arp_entry_present;
+ uint32_t default_arp_entry_port_id;
+ void *default_arp_entry_ptr;
+};
+
+static void *
+pipeline_routing_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline_routing *p;
+ uint32_t size;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_routing));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+
+ TAILQ_INIT(&p->routes);
+ p->n_routes = 0;
+
+ TAILQ_INIT(&p->arp_entries);
+ p->n_arp_entries = 0;
+
+ return p;
+}
+
+static int
+app_pipeline_routing_free(void *pipeline)
+{
+ struct pipeline_routing *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ while (!TAILQ_EMPTY(&p->routes)) {
+ struct app_pipeline_routing_route *route;
+
+ route = TAILQ_FIRST(&p->routes);
+ TAILQ_REMOVE(&p->routes, route, node);
+ rte_free(route);
+ }
+
+ while (!TAILQ_EMPTY(&p->arp_entries)) {
+ struct app_pipeline_routing_arp_entry *arp_entry;
+
+ arp_entry = TAILQ_FIRST(&p->arp_entries);
+ TAILQ_REMOVE(&p->arp_entries, arp_entry, node);
+ rte_free(arp_entry);
+ }
+
+ rte_free(p);
+ return 0;
+}
+
+static struct app_pipeline_routing_route *
+app_pipeline_routing_find_route(struct pipeline_routing *p,
+ const struct pipeline_routing_route_key *key)
+{
+ struct app_pipeline_routing_route *it, *found;
+
+ found = NULL;
+ TAILQ_FOREACH(it, &p->routes, node) {
+ if ((key->type == it->key.type) &&
+ (key->key.ipv4.ip == it->key.key.ipv4.ip) &&
+ (key->key.ipv4.depth == it->key.key.ipv4.depth)) {
+ found = it;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static struct app_pipeline_routing_arp_entry *
+app_pipeline_routing_find_arp_entry(struct pipeline_routing *p,
+ const struct pipeline_routing_arp_key *key)
+{
+ struct app_pipeline_routing_arp_entry *it, *found;
+
+ found = NULL;
+ TAILQ_FOREACH(it, &p->arp_entries, node) {
+ if ((key->type == it->key.type) &&
+ (key->key.ipv4.port_id == it->key.key.ipv4.port_id) &&
+ (key->key.ipv4.ip == it->key.key.ipv4.ip)) {
+ found = it;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static void
+print_route(const struct app_pipeline_routing_route *route)
+{
+ if (route->key.type == PIPELINE_ROUTING_ROUTE_IPV4) {
+ const struct pipeline_routing_route_key_ipv4 *key =
+ &route->key.key.ipv4;
+
+ printf("IP Prefix = %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 "/%" PRIu32
+ " => (Port = %" PRIu32,
+
+ (key->ip >> 24) & 0xFF,
+ (key->ip >> 16) & 0xFF,
+ (key->ip >> 8) & 0xFF,
+ key->ip & 0xFF,
+
+ key->depth,
+ route->data.port_id);
+
+ if (route->data.flags & PIPELINE_ROUTING_ROUTE_ARP)
+ printf(
+ ", Next Hop IP = %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32,
+
+ (route->data.ethernet.ip >> 24) & 0xFF,
+ (route->data.ethernet.ip >> 16) & 0xFF,
+ (route->data.ethernet.ip >> 8) & 0xFF,
+ route->data.ethernet.ip & 0xFF);
+ else
+ printf(
+ ", Next Hop HWaddress = %02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32,
+
+ route->data.ethernet.macaddr.addr_bytes[0],
+ route->data.ethernet.macaddr.addr_bytes[1],
+ route->data.ethernet.macaddr.addr_bytes[2],
+ route->data.ethernet.macaddr.addr_bytes[3],
+ route->data.ethernet.macaddr.addr_bytes[4],
+ route->data.ethernet.macaddr.addr_bytes[5]);
+
+ if (route->data.flags & PIPELINE_ROUTING_ROUTE_QINQ)
+ printf(", QinQ SVLAN = %" PRIu32 " CVLAN = %" PRIu32,
+ route->data.l2.qinq.svlan,
+ route->data.l2.qinq.cvlan);
+
+ if (route->data.flags & PIPELINE_ROUTING_ROUTE_MPLS) {
+ uint32_t i;
+
+ printf(", MPLS labels");
+ for (i = 0; i < route->data.l2.mpls.n_labels; i++)
+ printf(" %" PRIu32,
+ route->data.l2.mpls.labels[i]);
+ }
+
+ printf(")\n");
+ }
+}
+
+static void
+print_arp_entry(const struct app_pipeline_routing_arp_entry *entry)
+{
+ printf("(Port = %" PRIu32 ", IP = %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32
+ ") => HWaddress = %02" PRIx32 ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32 ":%02" PRIx32 "\n",
+
+ entry->key.key.ipv4.port_id,
+ (entry->key.key.ipv4.ip >> 24) & 0xFF,
+ (entry->key.key.ipv4.ip >> 16) & 0xFF,
+ (entry->key.key.ipv4.ip >> 8) & 0xFF,
+ entry->key.key.ipv4.ip & 0xFF,
+
+ entry->macaddr.addr_bytes[0],
+ entry->macaddr.addr_bytes[1],
+ entry->macaddr.addr_bytes[2],
+ entry->macaddr.addr_bytes[3],
+ entry->macaddr.addr_bytes[4],
+ entry->macaddr.addr_bytes[5]);
+}
+
+static int
+app_pipeline_routing_route_ls(struct app_params *app, uint32_t pipeline_id)
+{
+ struct pipeline_routing *p;
+ struct app_pipeline_routing_route *it;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -EINVAL;
+
+ TAILQ_FOREACH(it, &p->routes, node)
+ print_route(it);
+
+ if (p->default_route_present)
+ printf("Default route: port %" PRIu32 " (entry ptr = %p)\n",
+ p->default_route_port_id,
+ p->default_route_entry_ptr);
+ else
+ printf("Default: DROP\n");
+
+ return 0;
+}
+
+int
+app_pipeline_routing_add_route(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_route_key *key,
+ struct pipeline_routing_route_data *data)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_route_add_msg_req *req;
+ struct pipeline_routing_route_add_msg_rsp *rsp;
+
+ struct app_pipeline_routing_route *entry;
+
+ int new_entry;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (data == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ switch (key->type) {
+ case PIPELINE_ROUTING_ROUTE_IPV4:
+ {
+ uint32_t depth = key->key.ipv4.depth;
+ uint32_t netmask;
+
+ /* key */
+ if ((depth == 0) || (depth > 32))
+ return -1;
+
+ netmask = (~0U) << (32 - depth);
+ key->key.ipv4.ip &= netmask;
+
+ /* data */
+ if (data->port_id >= p->n_ports_out)
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ /* Find existing rule or allocate new rule */
+ entry = app_pipeline_routing_find_route(p, key);
+ new_entry = (entry == NULL);
+ if (entry == NULL) {
+ entry = rte_malloc(NULL, sizeof(*entry), RTE_CACHE_LINE_SIZE);
+
+ if (entry == NULL)
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD;
+ memcpy(&req->key, key, sizeof(*key));
+ memcpy(&req->data, data, sizeof(*data));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ /* Read response and write entry */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL) ||
+ ((new_entry == 0) && (rsp->key_found == 0)) ||
+ ((new_entry == 1) && (rsp->key_found == 1))) {
+ app_msg_free(app, rsp);
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ memcpy(&entry->key, key, sizeof(*key));
+ memcpy(&entry->data, data, sizeof(*data));
+ entry->entry_ptr = rsp->entry_ptr;
+
+ /* Commit entry */
+ if (new_entry) {
+ TAILQ_INSERT_TAIL(&p->routes, entry, node);
+ p->n_routes++;
+ }
+
+ print_route(entry);
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+ return 0;
+}
+
+int
+app_pipeline_routing_delete_route(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_route_key *key)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_route_delete_msg_req *req;
+ struct pipeline_routing_route_delete_msg_rsp *rsp;
+
+ struct app_pipeline_routing_route *entry;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ switch (key->type) {
+ case PIPELINE_ROUTING_ROUTE_IPV4:
+ {
+ uint32_t depth = key->key.ipv4.depth;
+ uint32_t netmask;
+
+ /* key */
+ if ((depth == 0) || (depth > 32))
+ return -1;
+
+ netmask = (~0U) << (32 - depth);
+ key->key.ipv4.ip &= netmask;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ /* Find rule */
+ entry = app_pipeline_routing_find_route(p, key);
+ if (entry == NULL)
+ return 0;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL;
+ memcpy(&req->key, key, sizeof(*key));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Remove route */
+ TAILQ_REMOVE(&p->routes, entry, node);
+ p->n_routes--;
+ rte_free(entry);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_add_default_route(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_route_add_default_msg_req *req;
+ struct pipeline_routing_route_add_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write route */
+ if (rsp->status || (rsp->entry_ptr == NULL)) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ p->default_route_port_id = port_id;
+ p->default_route_entry_ptr = rsp->entry_ptr;
+
+ /* Commit route */
+ p->default_route_present = 1;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_delete_default_route(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_delete_default_msg_req *req;
+ struct pipeline_routing_arp_delete_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write route */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit route */
+ p->default_route_present = 0;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+static int
+app_pipeline_routing_arp_ls(struct app_params *app, uint32_t pipeline_id)
+{
+ struct pipeline_routing *p;
+ struct app_pipeline_routing_arp_entry *it;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -EINVAL;
+
+ TAILQ_FOREACH(it, &p->arp_entries, node)
+ print_arp_entry(it);
+
+ if (p->default_arp_entry_present)
+ printf("Default entry: port %" PRIu32 " (entry ptr = %p)\n",
+ p->default_arp_entry_port_id,
+ p->default_arp_entry_ptr);
+ else
+ printf("Default: DROP\n");
+
+ return 0;
+}
+
+int
+app_pipeline_routing_add_arp_entry(struct app_params *app, uint32_t pipeline_id,
+ struct pipeline_routing_arp_key *key,
+ struct ether_addr *macaddr)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_add_msg_req *req;
+ struct pipeline_routing_arp_add_msg_rsp *rsp;
+
+ struct app_pipeline_routing_arp_entry *entry;
+
+ int new_entry;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (macaddr == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ switch (key->type) {
+ case PIPELINE_ROUTING_ARP_IPV4:
+ {
+ uint32_t port_id = key->key.ipv4.port_id;
+
+ /* key */
+ if (port_id >= p->n_ports_out)
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ /* Find existing entry or allocate new */
+ entry = app_pipeline_routing_find_arp_entry(p, key);
+ new_entry = (entry == NULL);
+ if (entry == NULL) {
+ entry = rte_malloc(NULL, sizeof(*entry), RTE_CACHE_LINE_SIZE);
+
+ if (entry == NULL)
+ return -1;
+ }
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_ADD;
+ memcpy(&req->key, key, sizeof(*key));
+ ether_addr_copy(macaddr, &req->macaddr);
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ /* Read response and write entry */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL) ||
+ ((new_entry == 0) && (rsp->key_found == 0)) ||
+ ((new_entry == 1) && (rsp->key_found == 1))) {
+ app_msg_free(app, rsp);
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ memcpy(&entry->key, key, sizeof(*key));
+ ether_addr_copy(macaddr, &entry->macaddr);
+ entry->entry_ptr = rsp->entry_ptr;
+
+ /* Commit entry */
+ if (new_entry) {
+ TAILQ_INSERT_TAIL(&p->arp_entries, entry, node);
+ p->n_arp_entries++;
+ }
+
+ print_arp_entry(entry);
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+ return 0;
+}
+
+int
+app_pipeline_routing_delete_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_arp_key *key)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_delete_msg_req *req;
+ struct pipeline_routing_arp_delete_msg_rsp *rsp;
+
+ struct app_pipeline_routing_arp_entry *entry;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -EINVAL;
+
+ switch (key->type) {
+ case PIPELINE_ROUTING_ARP_IPV4:
+ {
+ uint32_t port_id = key->key.ipv4.port_id;
+
+ /* key */
+ if (port_id >= p->n_ports_out)
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ /* Find rule */
+ entry = app_pipeline_routing_find_arp_entry(p, key);
+ if (entry == NULL)
+ return 0;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_DEL;
+ memcpy(&req->key, key, sizeof(*key));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Remove entry */
+ TAILQ_REMOVE(&p->arp_entries, entry, node);
+ p->n_arp_entries--;
+ rte_free(entry);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_add_default_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_add_default_msg_req *req;
+ struct pipeline_routing_arp_add_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write entry */
+ if (rsp->status || rsp->entry_ptr == NULL) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ p->default_arp_entry_port_id = port_id;
+ p->default_arp_entry_ptr = rsp->entry_ptr;
+
+ /* Commit entry */
+ p->default_arp_entry_present = 1;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_delete_default_msg_req *req;
+ struct pipeline_routing_arp_delete_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -ETIMEDOUT;
+
+ /* Read response and write entry */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return rsp->status;
+ }
+
+ /* Commit entry */
+ p->default_arp_entry_present = 0;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+static int
+parse_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+{
+ uint32_t n_max_labels = *n_labels, count = 0;
+
+ /* Check for void list of labels */
+ if (strcmp(string, "<void>") == 0) {
+ *n_labels = 0;
+ return 0;
+ }
+
+ /* At least one label should be present */
+ for ( ; (*string != '\0'); ) {
+ char *next;
+ int value;
+
+ if (count >= n_max_labels)
+ return -1;
+
+ if (count > 0) {
+ if (string[0] != ':')
+ return -1;
+
+ string++;
+ }
+
+ value = strtol(string, &next, 10);
+ if (next == string)
+ return -1;
+ string = next;
+
+ labels[count++] = (uint32_t) value;
+ }
+
+ *n_labels = count;
+ return 0;
+}
+
+/*
+ * route add (mpls = no, qinq = no, arp = no)
+ */
+
+struct cmd_route_add1_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t ether_string;
+ struct ether_addr macaddr;
+};
+
+static void
+cmd_route_add1_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_add1_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ int status;
+
+ /* Create route */
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.depth = params->depth;
+
+ route_data.flags = 0;
+ route_data.port_id = params->port;
+ route_data.ethernet.macaddr = params->macaddr;
+
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_add1_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_add1_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add1_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, route_string,
+ "route");
+
+static cmdline_parse_token_string_t cmd_route_add1_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, add_string,
+ "add");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add1_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add1_result, ip);
+
+static cmdline_parse_token_num_t cmd_route_add1_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, depth, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add1_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, port_string,
+ "port");
+
+static cmdline_parse_token_num_t cmd_route_add1_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, port, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add1_ether_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, ether_string,
+ "ether");
+
+static cmdline_parse_token_etheraddr_t cmd_route_add1_macaddr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add1_result, macaddr);
+
+static cmdline_parse_inst_t cmd_route_add1 = {
+ .f = cmd_route_add1_parsed,
+ .data = NULL,
+ .help_str = "Route add (mpls = no, qinq = no, arp = no)",
+ .tokens = {
+ (void *)&cmd_route_add1_p_string,
+ (void *)&cmd_route_add1_p,
+ (void *)&cmd_route_add1_route_string,
+ (void *)&cmd_route_add1_add_string,
+ (void *)&cmd_route_add1_ip,
+ (void *)&cmd_route_add1_depth,
+ (void *)&cmd_route_add1_port_string,
+ (void *)&cmd_route_add1_port,
+ (void *)&cmd_route_add1_ether_string,
+ (void *)&cmd_route_add1_macaddr,
+ NULL,
+ },
+};
+
+/*
+ * route add (mpls = no, qinq = no, arp = yes)
+ */
+
+struct cmd_route_add2_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t ether_string;
+ cmdline_ipaddr_t nh_ip;
+};
+
+static void
+cmd_route_add2_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_add2_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ int status;
+
+ /* Create route */
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.depth = params->depth;
+
+ route_data.flags = PIPELINE_ROUTING_ROUTE_ARP;
+ route_data.port_id = params->port;
+ route_data.ethernet.ip =
+ rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
+
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_add2_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_add2_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add2_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, route_string,
+ "route");
+
+static cmdline_parse_token_string_t cmd_route_add2_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, add_string,
+ "add");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add2_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add2_result, ip);
+
+static cmdline_parse_token_num_t cmd_route_add2_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, depth, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add2_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, port_string,
+ "port");
+
+static cmdline_parse_token_num_t cmd_route_add2_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, port, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add2_ether_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, ether_string,
+ "ether");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add2_nh_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add2_result, nh_ip);
+
+static cmdline_parse_inst_t cmd_route_add2 = {
+ .f = cmd_route_add2_parsed,
+ .data = NULL,
+ .help_str = "Route add (mpls = no, qinq = no, arp = yes)",
+ .tokens = {
+ (void *)&cmd_route_add2_p_string,
+ (void *)&cmd_route_add2_p,
+ (void *)&cmd_route_add2_route_string,
+ (void *)&cmd_route_add2_add_string,
+ (void *)&cmd_route_add2_ip,
+ (void *)&cmd_route_add2_depth,
+ (void *)&cmd_route_add2_port_string,
+ (void *)&cmd_route_add2_port,
+ (void *)&cmd_route_add2_ether_string,
+ (void *)&cmd_route_add2_nh_ip,
+ NULL,
+ },
+};
+
+/*
+ * route add (mpls = no, qinq = yes, arp = no)
+ */
+
+struct cmd_route_add3_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t ether_string;
+ struct ether_addr macaddr;
+ cmdline_fixed_string_t qinq_string;
+ uint32_t svlan;
+ uint32_t cvlan;
+};
+
+static void
+cmd_route_add3_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_add3_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ int status;
+
+ /* Create route */
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.depth = params->depth;
+
+ route_data.flags = PIPELINE_ROUTING_ROUTE_QINQ;
+ route_data.port_id = params->port;
+ route_data.ethernet.macaddr = params->macaddr;
+ route_data.l2.qinq.svlan = params->svlan;
+ route_data.l2.qinq.cvlan = params->cvlan;
+
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_add3_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_add3_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add3_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, route_string,
+ "route");
+
+static cmdline_parse_token_string_t cmd_route_add3_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, add_string,
+ "add");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add3_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add3_result, ip);
+
+static cmdline_parse_token_num_t cmd_route_add3_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, depth, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add3_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, port_string,
+ "port");
+
+static cmdline_parse_token_num_t cmd_route_add3_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, port, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add3_ether_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, ether_string,
+ "ether");
+
+static cmdline_parse_token_etheraddr_t cmd_route_add3_macaddr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add3_result, macaddr);
+
+static cmdline_parse_token_string_t cmd_route_add3_qinq_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, qinq_string,
+ "qinq");
+
+static cmdline_parse_token_num_t cmd_route_add3_svlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, svlan, UINT32);
+
+static cmdline_parse_token_num_t cmd_route_add3_cvlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, cvlan, UINT32);
+
+static cmdline_parse_inst_t cmd_route_add3 = {
+ .f = cmd_route_add3_parsed,
+ .data = NULL,
+ .help_str = "Route add (qinq = yes, arp = no)",
+ .tokens = {
+ (void *)&cmd_route_add3_p_string,
+ (void *)&cmd_route_add3_p,
+ (void *)&cmd_route_add3_route_string,
+ (void *)&cmd_route_add3_add_string,
+ (void *)&cmd_route_add3_ip,
+ (void *)&cmd_route_add3_depth,
+ (void *)&cmd_route_add3_port_string,
+ (void *)&cmd_route_add3_port,
+ (void *)&cmd_route_add3_ether_string,
+ (void *)&cmd_route_add3_macaddr,
+ (void *)&cmd_route_add3_qinq_string,
+ (void *)&cmd_route_add3_svlan,
+ (void *)&cmd_route_add3_cvlan,
+ NULL,
+ },
+};
+
+/*
+ * route add (mpls = no, qinq = yes, arp = yes)
+ */
+
+struct cmd_route_add4_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t ether_string;
+ cmdline_ipaddr_t nh_ip;
+ cmdline_fixed_string_t qinq_string;
+ uint32_t svlan;
+ uint32_t cvlan;
+};
+
+static void
+cmd_route_add4_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_add4_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ int status;
+
+ /* Create route */
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.depth = params->depth;
+
+ route_data.flags = PIPELINE_ROUTING_ROUTE_QINQ |
+ PIPELINE_ROUTING_ROUTE_ARP;
+ route_data.port_id = params->port;
+ route_data.ethernet.ip =
+ rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
+ route_data.l2.qinq.svlan = params->svlan;
+ route_data.l2.qinq.cvlan = params->cvlan;
+
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_add4_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_add4_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add4_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, route_string,
+ "route");
+
+static cmdline_parse_token_string_t cmd_route_add4_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, add_string,
+ "add");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add4_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add4_result, ip);
+
+static cmdline_parse_token_num_t cmd_route_add4_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, depth, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add4_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, port_string,
+ "port");
+
+static cmdline_parse_token_num_t cmd_route_add4_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, port, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add4_ether_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, ether_string,
+ "ether");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add4_nh_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add4_result, nh_ip);
+
+static cmdline_parse_token_string_t cmd_route_add4_qinq_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, qinq_string,
+ "qinq");
+
+static cmdline_parse_token_num_t cmd_route_add4_svlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, svlan, UINT32);
+
+static cmdline_parse_token_num_t cmd_route_add4_cvlan =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, cvlan, UINT32);
+
+static cmdline_parse_inst_t cmd_route_add4 = {
+ .f = cmd_route_add4_parsed,
+ .data = NULL,
+ .help_str = "Route add (qinq = yes, arp = yes)",
+ .tokens = {
+ (void *)&cmd_route_add4_p_string,
+ (void *)&cmd_route_add4_p,
+ (void *)&cmd_route_add4_route_string,
+ (void *)&cmd_route_add4_add_string,
+ (void *)&cmd_route_add4_ip,
+ (void *)&cmd_route_add4_depth,
+ (void *)&cmd_route_add4_port_string,
+ (void *)&cmd_route_add4_port,
+ (void *)&cmd_route_add4_ether_string,
+ (void *)&cmd_route_add4_nh_ip,
+ (void *)&cmd_route_add4_qinq_string,
+ (void *)&cmd_route_add4_svlan,
+ (void *)&cmd_route_add4_cvlan,
+ NULL,
+ },
+};
+
+/*
+ * route add (mpls = yes, qinq = no, arp = no)
+ */
+
+struct cmd_route_add5_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t ether_string;
+ struct ether_addr macaddr;
+ cmdline_fixed_string_t mpls_string;
+ cmdline_fixed_string_t mpls_labels;
+};
+
+static void
+cmd_route_add5_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_add5_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
+ uint32_t n_labels = RTE_DIM(mpls_labels);
+ uint32_t i;
+ int status;
+
+ /* Parse MPLS labels */
+ status = parse_labels(params->mpls_labels, mpls_labels, &n_labels);
+ if (status) {
+ printf("MPLS labels parse error\n");
+ return;
+ }
+
+ /* Create route */
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.depth = params->depth;
+
+ route_data.flags = PIPELINE_ROUTING_ROUTE_MPLS;
+ route_data.port_id = params->port;
+ route_data.ethernet.macaddr = params->macaddr;
+ for (i = 0; i < n_labels; i++)
+ route_data.l2.mpls.labels[i] = mpls_labels[i];
+ route_data.l2.mpls.n_labels = n_labels;
+
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_add5_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_add5_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add5_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, route_string,
+ "route");
+
+static cmdline_parse_token_string_t cmd_route_add5_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, add_string,
+ "add");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add5_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add5_result, ip);
+
+static cmdline_parse_token_num_t cmd_route_add5_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, depth, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add5_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, port_string,
+ "port");
+
+static cmdline_parse_token_num_t cmd_route_add5_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, port, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add5_ether_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, ether_string,
+ "ether");
+
+static cmdline_parse_token_etheraddr_t cmd_route_add5_macaddr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add5_result, macaddr);
+
+static cmdline_parse_token_string_t cmd_route_add5_mpls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, mpls_string,
+ "mpls");
+
+static cmdline_parse_token_string_t cmd_route_add5_mpls_labels =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, mpls_labels,
+ NULL);
+
+static cmdline_parse_inst_t cmd_route_add5 = {
+ .f = cmd_route_add5_parsed,
+ .data = NULL,
+ .help_str = "Route add (mpls = yes, arp = no)",
+ .tokens = {
+ (void *)&cmd_route_add5_p_string,
+ (void *)&cmd_route_add5_p,
+ (void *)&cmd_route_add5_route_string,
+ (void *)&cmd_route_add5_add_string,
+ (void *)&cmd_route_add5_ip,
+ (void *)&cmd_route_add5_depth,
+ (void *)&cmd_route_add5_port_string,
+ (void *)&cmd_route_add5_port,
+ (void *)&cmd_route_add5_ether_string,
+ (void *)&cmd_route_add5_macaddr,
+ (void *)&cmd_route_add5_mpls_string,
+ (void *)&cmd_route_add5_mpls_labels,
+ NULL,
+ },
+};
+
+/*
+ * route add (mpls = yes, qinq = no, arp = yes)
+ */
+
+struct cmd_route_add6_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+ cmdline_fixed_string_t port_string;
+ uint32_t port;
+ cmdline_fixed_string_t ether_string;
+ cmdline_ipaddr_t nh_ip;
+ cmdline_fixed_string_t mpls_string;
+ cmdline_fixed_string_t mpls_labels;
+};
+
+static void
+cmd_route_add6_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_add6_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
+ uint32_t n_labels = RTE_DIM(mpls_labels);
+ uint32_t i;
+ int status;
+
+ /* Parse MPLS labels */
+ status = parse_labels(params->mpls_labels, mpls_labels, &n_labels);
+ if (status) {
+ printf("MPLS labels parse error\n");
+ return;
+ }
+
+ /* Create route */
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.depth = params->depth;
+
+ route_data.flags = PIPELINE_ROUTING_ROUTE_MPLS |
+ PIPELINE_ROUTING_ROUTE_ARP;
+ route_data.port_id = params->port;
+ route_data.ethernet.ip =
+ rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
+ for (i = 0; i < n_labels; i++)
+ route_data.l2.mpls.labels[i] = mpls_labels[i];
+ route_data.l2.mpls.n_labels = n_labels;
+
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_add6_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_add6_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add6_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, route_string,
+ "route");
+
+static cmdline_parse_token_string_t cmd_route_add6_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, add_string,
+ "add");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add6_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add6_result, ip);
+
+static cmdline_parse_token_num_t cmd_route_add6_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, depth, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add6_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, port_string,
+ "port");
+
+static cmdline_parse_token_num_t cmd_route_add6_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, port, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_add6_ether_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, ether_string,
+ "ether");
+
+static cmdline_parse_token_ipaddr_t cmd_route_add6_nh_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_add6_result, nh_ip);
+
+static cmdline_parse_token_string_t cmd_route_add6_mpls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, mpls_string,
+ "mpls");
+
+static cmdline_parse_token_string_t cmd_route_add6_mpls_labels =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, mpls_labels,
+ NULL);
+
+static cmdline_parse_inst_t cmd_route_add6 = {
+ .f = cmd_route_add6_parsed,
+ .data = NULL,
+ .help_str = "Route add (mpls = yes, arp = yes)",
+ .tokens = {
+ (void *)&cmd_route_add6_p_string,
+ (void *)&cmd_route_add6_p,
+ (void *)&cmd_route_add6_route_string,
+ (void *)&cmd_route_add6_add_string,
+ (void *)&cmd_route_add6_ip,
+ (void *)&cmd_route_add6_depth,
+ (void *)&cmd_route_add6_port_string,
+ (void *)&cmd_route_add6_port,
+ (void *)&cmd_route_add6_ether_string,
+ (void *)&cmd_route_add6_nh_ip,
+ (void *)&cmd_route_add6_mpls_string,
+ (void *)&cmd_route_add6_mpls_labels,
+ NULL,
+ },
+};
+
+/*
+ * route del
+ */
+
+struct cmd_route_del_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+};
+
+static void
+cmd_route_del_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_del_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_routing_route_key key;
+
+ int status;
+
+ /* Create route */
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.depth = params->depth;
+
+ status = app_pipeline_routing_delete_route(app, params->p, &key);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_del_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_del_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_del_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_del_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, route_string,
+ "route");
+
+static cmdline_parse_token_string_t cmd_route_del_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, del_string,
+ "del");
+
+static cmdline_parse_token_ipaddr_t cmd_route_del_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_route_del_result, ip);
+
+static cmdline_parse_token_num_t cmd_route_del_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_del_result, depth, UINT32);
+
+static cmdline_parse_inst_t cmd_route_del = {
+ .f = cmd_route_del_parsed,
+ .data = NULL,
+ .help_str = "Route delete",
+ .tokens = {
+ (void *)&cmd_route_del_p_string,
+ (void *)&cmd_route_del_p,
+ (void *)&cmd_route_del_route_string,
+ (void *)&cmd_route_del_del_string,
+ (void *)&cmd_route_del_ip,
+ (void *)&cmd_route_del_depth,
+ NULL,
+ },
+};
+
+/*
+ * route add default
+ */
+
+struct cmd_route_add_default_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t default_string;
+ uint32_t port;
+};
+
+static void
+cmd_route_add_default_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_add_default_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_routing_add_default_route(app, params->p,
+ params->port);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_add_default_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_add_default_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add_default_result, p, UINT32);
+
+cmdline_parse_token_string_t cmd_route_add_default_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
+ route_string, "route");
+
+cmdline_parse_token_string_t cmd_route_add_default_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
+ add_string, "add");
+
+cmdline_parse_token_string_t cmd_route_add_default_default_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
+ default_string, "default");
+
+cmdline_parse_token_num_t cmd_route_add_default_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_add_default_result,
+ port, UINT32);
+
+cmdline_parse_inst_t cmd_route_add_default = {
+ .f = cmd_route_add_default_parsed,
+ .data = NULL,
+ .help_str = "Route default set",
+ .tokens = {
+ (void *)&cmd_route_add_default_p_string,
+ (void *)&cmd_route_add_default_p,
+ (void *)&cmd_route_add_default_route_string,
+ (void *)&cmd_route_add_default_add_string,
+ (void *)&cmd_route_add_default_default_string,
+ (void *)&cmd_route_add_default_port,
+ NULL,
+ },
+};
+
+/*
+ * route del default
+ */
+
+struct cmd_route_del_default_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t default_string;
+};
+
+static void
+cmd_route_del_default_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_del_default_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_routing_delete_default_route(app, params->p);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_del_default_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_route_del_default_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_del_default_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_del_default_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
+ route_string, "route");
+
+static cmdline_parse_token_string_t cmd_route_del_default_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
+ del_string, "del");
+
+static cmdline_parse_token_string_t cmd_route_del_default_default_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
+ default_string, "default");
+
+
+static cmdline_parse_inst_t cmd_route_del_default = {
+ .f = cmd_route_del_default_parsed,
+ .data = NULL,
+ .help_str = "Route default clear",
+ .tokens = {
+ (void *)&cmd_route_del_default_p_string,
+ (void *)&cmd_route_del_default_p,
+ (void *)&cmd_route_del_default_route_string,
+ (void *)&cmd_route_del_default_del_string,
+ (void *)&cmd_route_del_default_default_string,
+ NULL,
+ },
+};
+
+/*
+ * route ls
+ */
+
+struct cmd_route_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+static void
+cmd_route_ls_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_ls_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_routing_route_ls(app, params->p);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_route_ls_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_route_ls_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_ls_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_ls_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result,
+ route_string, "route");
+
+static cmdline_parse_token_string_t cmd_route_ls_ls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result, ls_string,
+ "ls");
+
+static cmdline_parse_inst_t cmd_route_ls = {
+ .f = cmd_route_ls_parsed,
+ .data = NULL,
+ .help_str = "Route list",
+ .tokens = {
+ (void *)&cmd_route_ls_p_string,
+ (void *)&cmd_route_ls_p,
+ (void *)&cmd_route_ls_route_string,
+ (void *)&cmd_route_ls_ls_string,
+ NULL,
+ },
+};
+
+/*
+ * arp add
+ */
+
+struct cmd_arp_add_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+ cmdline_fixed_string_t add_string;
+ uint32_t port_id;
+ cmdline_ipaddr_t ip;
+ struct ether_addr macaddr;
+
+};
+
+static void
+cmd_arp_add_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_arp_add_result *params = parsed_result;
+ struct app_params *app = data;
+
+ struct pipeline_routing_arp_key key;
+ int status;
+
+ key.type = PIPELINE_ROUTING_ARP_IPV4;
+ key.key.ipv4.port_id = params->port_id;
+ key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
+
+ status = app_pipeline_routing_add_arp_entry(app,
+ params->p,
+ &key,
+ &params->macaddr);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_arp_add_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_add_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_add_arp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, arp_string, "arp");
+
+static cmdline_parse_token_string_t cmd_arp_add_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, add_string, "add");
+
+static cmdline_parse_token_num_t cmd_arp_add_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, port_id, UINT32);
+
+static cmdline_parse_token_ipaddr_t cmd_arp_add_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_arp_add_result, ip);
+
+static cmdline_parse_token_etheraddr_t cmd_arp_add_macaddr =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_arp_add_result, macaddr);
+
+static cmdline_parse_inst_t cmd_arp_add = {
+ .f = cmd_arp_add_parsed,
+ .data = NULL,
+ .help_str = "ARP add",
+ .tokens = {
+ (void *)&cmd_arp_add_p_string,
+ (void *)&cmd_arp_add_p,
+ (void *)&cmd_arp_add_arp_string,
+ (void *)&cmd_arp_add_add_string,
+ (void *)&cmd_arp_add_port_id,
+ (void *)&cmd_arp_add_ip,
+ (void *)&cmd_arp_add_macaddr,
+ NULL,
+ },
+};
+
+/*
+ * arp del
+ */
+
+struct cmd_arp_del_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+ cmdline_fixed_string_t del_string;
+ uint32_t port_id;
+ cmdline_ipaddr_t ip;
+};
+
+static void
+cmd_arp_del_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_arp_del_result *params = parsed_result;
+ struct app_params *app = data;
+
+ struct pipeline_routing_arp_key key;
+ int status;
+
+ key.type = PIPELINE_ROUTING_ARP_IPV4;
+ key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.port_id = params->port_id;
+
+ status = app_pipeline_routing_delete_arp_entry(app, params->p, &key);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_arp_del_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_del_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_del_arp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "arp");
+
+static cmdline_parse_token_string_t cmd_arp_del_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, del_string, "del");
+
+static cmdline_parse_token_num_t cmd_arp_del_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, port_id, UINT32);
+
+static cmdline_parse_token_ipaddr_t cmd_arp_del_ip =
+ TOKEN_IPV4_INITIALIZER(struct cmd_arp_del_result, ip);
+
+static cmdline_parse_inst_t cmd_arp_del = {
+ .f = cmd_arp_del_parsed,
+ .data = NULL,
+ .help_str = "ARP delete",
+ .tokens = {
+ (void *)&cmd_arp_del_p_string,
+ (void *)&cmd_arp_del_p,
+ (void *)&cmd_arp_del_arp_string,
+ (void *)&cmd_arp_del_del_string,
+ (void *)&cmd_arp_del_port_id,
+ (void *)&cmd_arp_del_ip,
+ NULL,
+ },
+};
+
+/*
+ * arp add default
+ */
+
+struct cmd_arp_add_default_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_fixed_string_t default_string;
+ uint32_t port_id;
+};
+
+static void
+cmd_arp_add_default_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_arp_add_default_result *params = parsed_result;
+ struct app_params *app = data;
+
+ int status;
+
+ status = app_pipeline_routing_add_default_arp_entry(app,
+ params->p,
+ params->port_id);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_arp_add_default_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_add_default_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_add_default_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_add_default_arp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, arp_string,
+ "arp");
+
+static cmdline_parse_token_string_t cmd_arp_add_default_add_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, add_string,
+ "add");
+
+static cmdline_parse_token_string_t cmd_arp_add_default_default_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result,
+ default_string, "default");
+
+static cmdline_parse_token_num_t cmd_arp_add_default_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_add_default_result, port_id,
+ UINT32);
+
+static cmdline_parse_inst_t cmd_arp_add_default = {
+ .f = cmd_arp_add_default_parsed,
+ .data = NULL,
+ .help_str = "ARP add default",
+ .tokens = {
+ (void *)&cmd_arp_add_default_p_string,
+ (void *)&cmd_arp_add_default_p,
+ (void *)&cmd_arp_add_default_arp_string,
+ (void *)&cmd_arp_add_default_add_string,
+ (void *)&cmd_arp_add_default_default_string,
+ (void *)&cmd_arp_add_default_port_id,
+ NULL,
+ },
+};
+
+/*
+ * arp del default
+ */
+
+struct cmd_arp_del_default_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_fixed_string_t default_string;
+};
+
+static void
+cmd_arp_del_default_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_arp_del_default_result *params = parsed_result;
+ struct app_params *app = data;
+
+ int status;
+
+ status = app_pipeline_routing_delete_default_arp_entry(app, params->p);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_arp_del_default_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_del_default_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_del_default_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_del_default_arp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, arp_string,
+ "arp");
+
+static cmdline_parse_token_string_t cmd_arp_del_default_del_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, del_string,
+ "del");
+
+static cmdline_parse_token_string_t cmd_arp_del_default_default_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result,
+ default_string, "default");
+
+static cmdline_parse_inst_t cmd_arp_del_default = {
+ .f = cmd_arp_del_default_parsed,
+ .data = NULL,
+ .help_str = "ARP delete default",
+ .tokens = {
+ (void *)&cmd_arp_del_default_p_string,
+ (void *)&cmd_arp_del_default_p,
+ (void *)&cmd_arp_del_default_arp_string,
+ (void *)&cmd_arp_del_default_del_string,
+ (void *)&cmd_arp_del_default_default_string,
+ NULL,
+ },
+};
+
+/*
+ * arp ls
+ */
+
+struct cmd_arp_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+static void
+cmd_arp_ls_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_arp_ls_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_routing *p;
+
+ p = app_pipeline_data_fe(app, params->p, &pipeline_routing);
+ if (p == NULL)
+ return;
+
+ app_pipeline_routing_arp_ls(app, params->p);
+}
+
+static cmdline_parse_token_string_t cmd_arp_ls_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_ls_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_ls_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_ls_arp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, arp_string,
+ "arp");
+
+static cmdline_parse_token_string_t cmd_arp_ls_ls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, ls_string,
+ "ls");
+
+static cmdline_parse_inst_t cmd_arp_ls = {
+ .f = cmd_arp_ls_parsed,
+ .data = NULL,
+ .help_str = "ARP list",
+ .tokens = {
+ (void *)&cmd_arp_ls_p_string,
+ (void *)&cmd_arp_ls_p,
+ (void *)&cmd_arp_ls_arp_string,
+ (void *)&cmd_arp_ls_ls_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *)&cmd_route_add1,
+ (cmdline_parse_inst_t *)&cmd_route_add2,
+ (cmdline_parse_inst_t *)&cmd_route_add3,
+ (cmdline_parse_inst_t *)&cmd_route_add4,
+ (cmdline_parse_inst_t *)&cmd_route_add5,
+ (cmdline_parse_inst_t *)&cmd_route_add6,
+ (cmdline_parse_inst_t *)&cmd_route_del,
+ (cmdline_parse_inst_t *)&cmd_route_add_default,
+ (cmdline_parse_inst_t *)&cmd_route_del_default,
+ (cmdline_parse_inst_t *)&cmd_route_ls,
+ (cmdline_parse_inst_t *)&cmd_arp_add,
+ (cmdline_parse_inst_t *)&cmd_arp_del,
+ (cmdline_parse_inst_t *)&cmd_arp_add_default,
+ (cmdline_parse_inst_t *)&cmd_arp_del_default,
+ (cmdline_parse_inst_t *)&cmd_arp_ls,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_routing_fe_ops = {
+ .f_init = pipeline_routing_init,
+ .f_free = app_pipeline_routing_free,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_routing = {
+ .name = "ROUTING",
+ .be_ops = &pipeline_routing_be_ops,
+ .fe_ops = &pipeline_routing_fe_ops,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.h b/examples/ip_pipeline/pipeline/pipeline_routing.h
new file mode 100644
index 00000000..fa41642b
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_routing.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_ROUTING_H__
+#define __INCLUDE_PIPELINE_ROUTING_H__
+
+#include "pipeline.h"
+#include "pipeline_routing_be.h"
+
+/*
+ * Route
+ */
+
+int
+app_pipeline_routing_add_route(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_route_key *key,
+ struct pipeline_routing_route_data *data);
+
+int
+app_pipeline_routing_delete_route(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_route_key *key);
+
+int
+app_pipeline_routing_add_default_route(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_routing_delete_default_route(struct app_params *app,
+ uint32_t pipeline_id);
+
+/*
+ * ARP
+ */
+
+int
+app_pipeline_routing_add_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_arp_key *key,
+ struct ether_addr *macaddr);
+
+int
+app_pipeline_routing_delete_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_arp_key *key);
+
+int
+app_pipeline_routing_add_default_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
+ uint32_t pipeline_id);
+
+/*
+ * Pipeline type
+ */
+extern struct pipeline_type pipeline_routing;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.c b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
new file mode 100644
index 00000000..bc5bf7a5
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
@@ -0,0 +1,1970 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_routing_be.h"
+#include "pipeline_actions_common.h"
+#include "parser.h"
+#include "hash_func.h"
+
+#define MPLS_LABEL(label, exp, s, ttl) \
+ (((((uint64_t) (label)) & 0xFFFFFLLU) << 12) | \
+ ((((uint64_t) (exp)) & 0x7LLU) << 9) | \
+ ((((uint64_t) (s)) & 0x1LLU) << 8) | \
+ (((uint64_t) (ttl)) & 0xFFLU))
+
+#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \
+ traffic_class, queue, color) \
+ ((((uint64_t) (queue)) & 0x3) | \
+ ((((uint64_t) (traffic_class)) & 0x3) << 2) | \
+ ((((uint64_t) (color)) & 0x3) << 4) | \
+ ((((uint64_t) (subport)) & 0xFFFF) << 16) | \
+ ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
+
+
+#define MAC_SRC_DEFAULT 0x112233445566ULL
+
+#ifndef PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s
+#define PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s 256
+#endif
+
+struct pipeline_routing {
+ struct pipeline p;
+ struct pipeline_routing_params params;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_ROUTING_MSG_REQS];
+} __rte_cache_aligned;
+
+/*
+ * Message handlers
+ */
+static void *
+pipeline_routing_msg_req_custom_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_routing_msg_req_custom_handler,
+};
+
+static void *
+pipeline_routing_msg_req_route_add_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_route_del_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_route_add_default_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_route_del_default_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_arp_add_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_arp_del_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_arp_add_default_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p,
+ void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD] =
+ pipeline_routing_msg_req_route_add_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL] =
+ pipeline_routing_msg_req_route_del_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT] =
+ pipeline_routing_msg_req_route_add_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT] =
+ pipeline_routing_msg_req_route_del_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ARP_ADD] =
+ pipeline_routing_msg_req_arp_add_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ARP_DEL] =
+ pipeline_routing_msg_req_arp_del_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT] =
+ pipeline_routing_msg_req_arp_add_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT] =
+ pipeline_routing_msg_req_arp_del_default_handler,
+};
+
+/*
+ * Routing table
+ */
+struct routing_table_entry {
+ struct rte_pipeline_table_entry head;
+ uint32_t flags;
+ uint32_t port_id; /* Output port ID */
+ uint32_t ip; /* Next hop IP address (only valid for remote routes) */
+
+ /* ether_l2 */
+ uint16_t data_offset;
+ uint16_t ether_l2_length;
+ uint64_t slab[4];
+ uint16_t slab_offset[4];
+};
+
+struct layout {
+ uint16_t a;
+ uint32_t b;
+ uint16_t c;
+} __attribute__((__packed__));
+
+#define MACADDR_DST_WRITE(slab_ptr, slab) \
+{ \
+ struct layout *dst = (struct layout *) (slab_ptr); \
+ struct layout *src = (struct layout *) &(slab); \
+ \
+ dst->b = src->b; \
+ dst->c = src->c; \
+}
+
+static inline __attribute__((always_inline)) void
+pkt_work_routing(
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry *table_entry,
+ void *arg,
+ int arp,
+ int qinq,
+ int qinq_sched,
+ int mpls,
+ int mpls_color_mark)
+{
+ struct pipeline_routing *p_rt = arg;
+
+ struct routing_table_entry *entry =
+ (struct routing_table_entry *) table_entry;
+
+ struct ipv4_hdr *ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, p_rt->params.ip_hdr_offset);
+
+ enum rte_meter_color pkt_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkt, p_rt->params.color_offset);
+
+ struct pipeline_routing_arp_key_ipv4 *arp_key =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, p_rt->params.arp_key_offset);
+
+ uint64_t *slab0_ptr, *slab1_ptr, *slab2_ptr, *slab3_ptr, sched;
+ uint32_t ip_da, nh_ip, port_id;
+ uint16_t total_length, data_offset, ether_l2_length;
+
+ /* Read */
+ total_length = rte_bswap16(ip->total_length);
+ ip_da = ip->dst_addr;
+ data_offset = entry->data_offset;
+ ether_l2_length = entry->ether_l2_length;
+ slab0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[0]);
+ slab1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[1]);
+ slab2_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[2]);
+ slab3_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[3]);
+
+ if (arp) {
+ port_id = entry->port_id;
+ nh_ip = entry->ip;
+ if (entry->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip = ip_da;
+ }
+
+ /* Compute */
+ total_length += ether_l2_length;
+
+ if (qinq && qinq_sched) {
+ uint32_t dscp = ip->type_of_service >> 2;
+ uint32_t svlan, cvlan, tc, tc_q;
+
+ if (qinq_sched == 1) {
+ uint64_t slab_qinq = rte_bswap64(entry->slab[0]);
+
+ svlan = (slab_qinq >> 48) & 0xFFF;
+ cvlan = (slab_qinq >> 16) & 0xFFF;
+ tc = (dscp >> 2) & 0x3;
+ tc_q = dscp & 0x3;
+ } else {
+ uint32_t ip_src = rte_bswap32(ip->src_addr);
+
+ svlan = 0;
+ cvlan = (ip_src >> 16) & 0xFFF;
+ tc = (ip_src >> 2) & 0x3;
+ tc_q = ip_src & 0x3;
+ }
+ sched = RTE_SCHED_PORT_HIERARCHY(svlan,
+ cvlan,
+ tc,
+ tc_q,
+ e_RTE_METER_GREEN);
+ }
+
+ /* Write */
+ pkt->data_off = data_offset;
+ pkt->data_len = total_length;
+ pkt->pkt_len = total_length;
+
+ if ((qinq == 0) && (mpls == 0)) {
+ *slab0_ptr = entry->slab[0];
+
+ if (arp == 0)
+ MACADDR_DST_WRITE(slab1_ptr, entry->slab[1]);
+ }
+
+ if (qinq) {
+ *slab0_ptr = entry->slab[0];
+ *slab1_ptr = entry->slab[1];
+
+ if (arp == 0)
+ MACADDR_DST_WRITE(slab2_ptr, entry->slab[2]);
+
+ if (qinq_sched) {
+ pkt->hash.sched.lo = sched & 0xFFFFFFFF;
+ pkt->hash.sched.hi = sched >> 32;
+ }
+ }
+
+ if (mpls) {
+ if (mpls_color_mark) {
+ uint64_t mpls_exp = rte_bswap64(
+ (MPLS_LABEL(0, pkt_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt_color, 0, 0));
+
+ *slab0_ptr = entry->slab[0] | mpls_exp;
+ *slab1_ptr = entry->slab[1] | mpls_exp;
+ *slab2_ptr = entry->slab[2];
+ } else {
+ *slab0_ptr = entry->slab[0];
+ *slab1_ptr = entry->slab[1];
+ *slab2_ptr = entry->slab[2];
+ }
+
+ if (arp == 0)
+ MACADDR_DST_WRITE(slab3_ptr, entry->slab[3]);
+ }
+
+ if (arp) {
+ arp_key->port_id = port_id;
+ arp_key->ip = nh_ip;
+ }
+}
+
+static inline __attribute__((always_inline)) void
+pkt4_work_routing(
+ struct rte_mbuf **pkts,
+ struct rte_pipeline_table_entry **table_entries,
+ void *arg,
+ int arp,
+ int qinq,
+ int qinq_sched,
+ int mpls,
+ int mpls_color_mark)
+{
+ struct pipeline_routing *p_rt = arg;
+
+ struct routing_table_entry *entry0 =
+ (struct routing_table_entry *) table_entries[0];
+ struct routing_table_entry *entry1 =
+ (struct routing_table_entry *) table_entries[1];
+ struct routing_table_entry *entry2 =
+ (struct routing_table_entry *) table_entries[2];
+ struct routing_table_entry *entry3 =
+ (struct routing_table_entry *) table_entries[3];
+
+ struct ipv4_hdr *ip0 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[0],
+ p_rt->params.ip_hdr_offset);
+ struct ipv4_hdr *ip1 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[1],
+ p_rt->params.ip_hdr_offset);
+ struct ipv4_hdr *ip2 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[2],
+ p_rt->params.ip_hdr_offset);
+ struct ipv4_hdr *ip3 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[3],
+ p_rt->params.ip_hdr_offset);
+
+ enum rte_meter_color pkt0_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkts[0], p_rt->params.color_offset);
+ enum rte_meter_color pkt1_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkts[1], p_rt->params.color_offset);
+ enum rte_meter_color pkt2_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkts[2], p_rt->params.color_offset);
+ enum rte_meter_color pkt3_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkts[3], p_rt->params.color_offset);
+
+ struct pipeline_routing_arp_key_ipv4 *arp_key0 =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[0],
+ p_rt->params.arp_key_offset);
+ struct pipeline_routing_arp_key_ipv4 *arp_key1 =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[1],
+ p_rt->params.arp_key_offset);
+ struct pipeline_routing_arp_key_ipv4 *arp_key2 =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[2],
+ p_rt->params.arp_key_offset);
+ struct pipeline_routing_arp_key_ipv4 *arp_key3 =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[3],
+ p_rt->params.arp_key_offset);
+
+ uint64_t *slab0_ptr0, *slab1_ptr0, *slab2_ptr0, *slab3_ptr0;
+ uint64_t *slab0_ptr1, *slab1_ptr1, *slab2_ptr1, *slab3_ptr1;
+ uint64_t *slab0_ptr2, *slab1_ptr2, *slab2_ptr2, *slab3_ptr2;
+ uint64_t *slab0_ptr3, *slab1_ptr3, *slab2_ptr3, *slab3_ptr3;
+ uint64_t sched0, sched1, sched2, sched3;
+
+ uint32_t ip_da0, nh_ip0, port_id0;
+ uint32_t ip_da1, nh_ip1, port_id1;
+ uint32_t ip_da2, nh_ip2, port_id2;
+ uint32_t ip_da3, nh_ip3, port_id3;
+
+ uint16_t total_length0, data_offset0, ether_l2_length0;
+ uint16_t total_length1, data_offset1, ether_l2_length1;
+ uint16_t total_length2, data_offset2, ether_l2_length2;
+ uint16_t total_length3, data_offset3, ether_l2_length3;
+
+ /* Read */
+ total_length0 = rte_bswap16(ip0->total_length);
+ total_length1 = rte_bswap16(ip1->total_length);
+ total_length2 = rte_bswap16(ip2->total_length);
+ total_length3 = rte_bswap16(ip3->total_length);
+
+ ip_da0 = ip0->dst_addr;
+ ip_da1 = ip1->dst_addr;
+ ip_da2 = ip2->dst_addr;
+ ip_da3 = ip3->dst_addr;
+
+ data_offset0 = entry0->data_offset;
+ data_offset1 = entry1->data_offset;
+ data_offset2 = entry2->data_offset;
+ data_offset3 = entry3->data_offset;
+
+ ether_l2_length0 = entry0->ether_l2_length;
+ ether_l2_length1 = entry1->ether_l2_length;
+ ether_l2_length2 = entry2->ether_l2_length;
+ ether_l2_length3 = entry3->ether_l2_length;
+
+ slab0_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ entry0->slab_offset[0]);
+ slab1_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ entry0->slab_offset[1]);
+ slab2_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ entry0->slab_offset[2]);
+ slab3_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ entry0->slab_offset[3]);
+
+ slab0_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ entry1->slab_offset[0]);
+ slab1_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ entry1->slab_offset[1]);
+ slab2_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ entry1->slab_offset[2]);
+ slab3_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ entry1->slab_offset[3]);
+
+ slab0_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ entry2->slab_offset[0]);
+ slab1_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ entry2->slab_offset[1]);
+ slab2_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ entry2->slab_offset[2]);
+ slab3_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ entry2->slab_offset[3]);
+
+ slab0_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ entry3->slab_offset[0]);
+ slab1_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ entry3->slab_offset[1]);
+ slab2_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ entry3->slab_offset[2]);
+ slab3_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ entry3->slab_offset[3]);
+
+ if (arp) {
+ port_id0 = entry0->port_id;
+ nh_ip0 = entry0->ip;
+ if (entry0->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip0 = ip_da0;
+
+ port_id1 = entry1->port_id;
+ nh_ip1 = entry1->ip;
+ if (entry1->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip1 = ip_da1;
+
+ port_id2 = entry2->port_id;
+ nh_ip2 = entry2->ip;
+ if (entry2->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip2 = ip_da2;
+
+ port_id3 = entry3->port_id;
+ nh_ip3 = entry3->ip;
+ if (entry3->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip3 = ip_da3;
+ }
+
+ /* Compute */
+ total_length0 += ether_l2_length0;
+ total_length1 += ether_l2_length1;
+ total_length2 += ether_l2_length2;
+ total_length3 += ether_l2_length3;
+
+ if (qinq && qinq_sched) {
+ uint32_t dscp0 = ip0->type_of_service >> 2;
+ uint32_t dscp1 = ip1->type_of_service >> 2;
+ uint32_t dscp2 = ip2->type_of_service >> 2;
+ uint32_t dscp3 = ip3->type_of_service >> 2;
+ uint32_t svlan0, cvlan0, tc0, tc_q0;
+ uint32_t svlan1, cvlan1, tc1, tc_q1;
+ uint32_t svlan2, cvlan2, tc2, tc_q2;
+ uint32_t svlan3, cvlan3, tc3, tc_q3;
+
+ if (qinq_sched == 1) {
+ uint64_t slab_qinq0 = rte_bswap64(entry0->slab[0]);
+ uint64_t slab_qinq1 = rte_bswap64(entry1->slab[0]);
+ uint64_t slab_qinq2 = rte_bswap64(entry2->slab[0]);
+ uint64_t slab_qinq3 = rte_bswap64(entry3->slab[0]);
+
+ svlan0 = (slab_qinq0 >> 48) & 0xFFF;
+ svlan1 = (slab_qinq1 >> 48) & 0xFFF;
+ svlan2 = (slab_qinq2 >> 48) & 0xFFF;
+ svlan3 = (slab_qinq3 >> 48) & 0xFFF;
+
+ cvlan0 = (slab_qinq0 >> 16) & 0xFFF;
+ cvlan1 = (slab_qinq1 >> 16) & 0xFFF;
+ cvlan2 = (slab_qinq2 >> 16) & 0xFFF;
+ cvlan3 = (slab_qinq3 >> 16) & 0xFFF;
+
+ tc0 = (dscp0 >> 2) & 0x3;
+ tc1 = (dscp1 >> 2) & 0x3;
+ tc2 = (dscp2 >> 2) & 0x3;
+ tc3 = (dscp3 >> 2) & 0x3;
+
+ tc_q0 = dscp0 & 0x3;
+ tc_q1 = dscp1 & 0x3;
+ tc_q2 = dscp2 & 0x3;
+ tc_q3 = dscp3 & 0x3;
+ } else {
+ uint32_t ip_src0 = rte_bswap32(ip0->src_addr);
+ uint32_t ip_src1 = rte_bswap32(ip1->src_addr);
+ uint32_t ip_src2 = rte_bswap32(ip2->src_addr);
+ uint32_t ip_src3 = rte_bswap32(ip3->src_addr);
+
+ svlan0 = 0;
+ svlan1 = 0;
+ svlan2 = 0;
+ svlan3 = 0;
+
+ cvlan0 = (ip_src0 >> 16) & 0xFFF;
+ cvlan1 = (ip_src1 >> 16) & 0xFFF;
+ cvlan2 = (ip_src2 >> 16) & 0xFFF;
+ cvlan3 = (ip_src3 >> 16) & 0xFFF;
+
+ tc0 = (ip_src0 >> 2) & 0x3;
+ tc1 = (ip_src1 >> 2) & 0x3;
+ tc2 = (ip_src2 >> 2) & 0x3;
+ tc3 = (ip_src3 >> 2) & 0x3;
+
+ tc_q0 = ip_src0 & 0x3;
+ tc_q1 = ip_src1 & 0x3;
+ tc_q2 = ip_src2 & 0x3;
+ tc_q3 = ip_src3 & 0x3;
+ }
+
+ sched0 = RTE_SCHED_PORT_HIERARCHY(svlan0,
+ cvlan0,
+ tc0,
+ tc_q0,
+ e_RTE_METER_GREEN);
+ sched1 = RTE_SCHED_PORT_HIERARCHY(svlan1,
+ cvlan1,
+ tc1,
+ tc_q1,
+ e_RTE_METER_GREEN);
+ sched2 = RTE_SCHED_PORT_HIERARCHY(svlan2,
+ cvlan2,
+ tc2,
+ tc_q2,
+ e_RTE_METER_GREEN);
+ sched3 = RTE_SCHED_PORT_HIERARCHY(svlan3,
+ cvlan3,
+ tc3,
+ tc_q3,
+ e_RTE_METER_GREEN);
+
+ }
+
+ /* Write */
+ pkts[0]->data_off = data_offset0;
+ pkts[1]->data_off = data_offset1;
+ pkts[2]->data_off = data_offset2;
+ pkts[3]->data_off = data_offset3;
+
+ pkts[0]->data_len = total_length0;
+ pkts[1]->data_len = total_length1;
+ pkts[2]->data_len = total_length2;
+ pkts[3]->data_len = total_length3;
+
+ pkts[0]->pkt_len = total_length0;
+ pkts[1]->pkt_len = total_length1;
+ pkts[2]->pkt_len = total_length2;
+ pkts[3]->pkt_len = total_length3;
+
+ if ((qinq == 0) && (mpls == 0)) {
+ *slab0_ptr0 = entry0->slab[0];
+ *slab0_ptr1 = entry1->slab[0];
+ *slab0_ptr2 = entry2->slab[0];
+ *slab0_ptr3 = entry3->slab[0];
+
+ if (arp == 0) {
+ MACADDR_DST_WRITE(slab1_ptr0, entry0->slab[1]);
+ MACADDR_DST_WRITE(slab1_ptr1, entry1->slab[1]);
+ MACADDR_DST_WRITE(slab1_ptr2, entry2->slab[1]);
+ MACADDR_DST_WRITE(slab1_ptr3, entry3->slab[1]);
+ }
+ }
+
+ if (qinq) {
+ *slab0_ptr0 = entry0->slab[0];
+ *slab0_ptr1 = entry1->slab[0];
+ *slab0_ptr2 = entry2->slab[0];
+ *slab0_ptr3 = entry3->slab[0];
+
+ *slab1_ptr0 = entry0->slab[1];
+ *slab1_ptr1 = entry1->slab[1];
+ *slab1_ptr2 = entry2->slab[1];
+ *slab1_ptr3 = entry3->slab[1];
+
+ if (arp == 0) {
+ MACADDR_DST_WRITE(slab2_ptr0, entry0->slab[2]);
+ MACADDR_DST_WRITE(slab2_ptr1, entry1->slab[2]);
+ MACADDR_DST_WRITE(slab2_ptr2, entry2->slab[2]);
+ MACADDR_DST_WRITE(slab2_ptr3, entry3->slab[2]);
+ }
+
+ if (qinq_sched) {
+ pkts[0]->hash.sched.lo = sched0 & 0xFFFFFFFF;
+ pkts[0]->hash.sched.hi = sched0 >> 32;
+ pkts[1]->hash.sched.lo = sched1 & 0xFFFFFFFF;
+ pkts[1]->hash.sched.hi = sched1 >> 32;
+ pkts[2]->hash.sched.lo = sched2 & 0xFFFFFFFF;
+ pkts[2]->hash.sched.hi = sched2 >> 32;
+ pkts[3]->hash.sched.lo = sched3 & 0xFFFFFFFF;
+ pkts[3]->hash.sched.hi = sched3 >> 32;
+ }
+ }
+
+ if (mpls) {
+ if (mpls_color_mark) {
+ uint64_t mpls_exp0 = rte_bswap64(
+ (MPLS_LABEL(0, pkt0_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt0_color, 0, 0));
+ uint64_t mpls_exp1 = rte_bswap64(
+ (MPLS_LABEL(0, pkt1_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt1_color, 0, 0));
+ uint64_t mpls_exp2 = rte_bswap64(
+ (MPLS_LABEL(0, pkt2_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt2_color, 0, 0));
+ uint64_t mpls_exp3 = rte_bswap64(
+ (MPLS_LABEL(0, pkt3_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt3_color, 0, 0));
+
+ *slab0_ptr0 = entry0->slab[0] | mpls_exp0;
+ *slab0_ptr1 = entry1->slab[0] | mpls_exp1;
+ *slab0_ptr2 = entry2->slab[0] | mpls_exp2;
+ *slab0_ptr3 = entry3->slab[0] | mpls_exp3;
+
+ *slab1_ptr0 = entry0->slab[1] | mpls_exp0;
+ *slab1_ptr1 = entry1->slab[1] | mpls_exp1;
+ *slab1_ptr2 = entry2->slab[1] | mpls_exp2;
+ *slab1_ptr3 = entry3->slab[1] | mpls_exp3;
+
+ *slab2_ptr0 = entry0->slab[2];
+ *slab2_ptr1 = entry1->slab[2];
+ *slab2_ptr2 = entry2->slab[2];
+ *slab2_ptr3 = entry3->slab[2];
+ } else {
+ *slab0_ptr0 = entry0->slab[0];
+ *slab0_ptr1 = entry1->slab[0];
+ *slab0_ptr2 = entry2->slab[0];
+ *slab0_ptr3 = entry3->slab[0];
+
+ *slab1_ptr0 = entry0->slab[1];
+ *slab1_ptr1 = entry1->slab[1];
+ *slab1_ptr2 = entry2->slab[1];
+ *slab1_ptr3 = entry3->slab[1];
+
+ *slab2_ptr0 = entry0->slab[2];
+ *slab2_ptr1 = entry1->slab[2];
+ *slab2_ptr2 = entry2->slab[2];
+ *slab2_ptr3 = entry3->slab[2];
+ }
+
+ if (arp == 0) {
+ MACADDR_DST_WRITE(slab3_ptr0, entry0->slab[3]);
+ MACADDR_DST_WRITE(slab3_ptr1, entry1->slab[3]);
+ MACADDR_DST_WRITE(slab3_ptr2, entry2->slab[3]);
+ MACADDR_DST_WRITE(slab3_ptr3, entry3->slab[3]);
+ }
+ }
+
+ if (arp) {
+ arp_key0->port_id = port_id0;
+ arp_key1->port_id = port_id1;
+ arp_key2->port_id = port_id2;
+ arp_key3->port_id = port_id3;
+
+ arp_key0->ip = nh_ip0;
+ arp_key1->ip = nh_ip1;
+ arp_key2->ip = nh_ip2;
+ arp_key3->ip = nh_ip3;
+ }
+}
+
+#define PKT_WORK_ROUTING_ETHERNET(arp) \
+static inline void \
+pkt_work_routing_ether_arp##arp( \
+ struct rte_mbuf *pkt, \
+ struct rte_pipeline_table_entry *table_entry, \
+ void *arg) \
+{ \
+ pkt_work_routing(pkt, table_entry, arg, arp, 0, 0, 0, 0);\
+}
+
+#define PKT4_WORK_ROUTING_ETHERNET(arp) \
+static inline void \
+pkt4_work_routing_ether_arp##arp( \
+ struct rte_mbuf **pkts, \
+ struct rte_pipeline_table_entry **table_entries, \
+ void *arg) \
+{ \
+ pkt4_work_routing(pkts, table_entries, arg, arp, 0, 0, 0, 0);\
+}
+
+#define routing_table_ah_hit_ether(arp) \
+PKT_WORK_ROUTING_ETHERNET(arp) \
+PKT4_WORK_ROUTING_ETHERNET(arp) \
+PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_arp##arp, \
+ pkt_work_routing_ether_arp##arp, \
+ pkt4_work_routing_ether_arp##arp)
+
+routing_table_ah_hit_ether(0)
+routing_table_ah_hit_ether(1)
+
+#define PKT_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
+static inline void \
+pkt_work_routing_ether_qinq_sched##sched##_arp##arp( \
+ struct rte_mbuf *pkt, \
+ struct rte_pipeline_table_entry *table_entry, \
+ void *arg) \
+{ \
+ pkt_work_routing(pkt, table_entry, arg, arp, 1, sched, 0, 0);\
+}
+
+#define PKT4_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
+static inline void \
+pkt4_work_routing_ether_qinq_sched##sched##_arp##arp( \
+ struct rte_mbuf **pkts, \
+ struct rte_pipeline_table_entry **table_entries, \
+ void *arg) \
+{ \
+ pkt4_work_routing(pkts, table_entries, arg, arp, 1, sched, 0, 0);\
+}
+
+#define routing_table_ah_hit_ether_qinq(sched, arp) \
+PKT_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
+PKT4_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
+PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_qinq_sched##sched##_arp##arp,\
+ pkt_work_routing_ether_qinq_sched##sched##_arp##arp, \
+ pkt4_work_routing_ether_qinq_sched##sched##_arp##arp)
+
+routing_table_ah_hit_ether_qinq(0, 0)
+routing_table_ah_hit_ether_qinq(1, 0)
+routing_table_ah_hit_ether_qinq(2, 0)
+routing_table_ah_hit_ether_qinq(0, 1)
+routing_table_ah_hit_ether_qinq(1, 1)
+routing_table_ah_hit_ether_qinq(2, 1)
+
+#define PKT_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
+static inline void \
+pkt_work_routing_ether_mpls_color##color##_arp##arp( \
+ struct rte_mbuf *pkt, \
+ struct rte_pipeline_table_entry *table_entry, \
+ void *arg) \
+{ \
+ pkt_work_routing(pkt, table_entry, arg, arp, 0, 0, 1, color);\
+}
+
+#define PKT4_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
+static inline void \
+pkt4_work_routing_ether_mpls_color##color##_arp##arp( \
+ struct rte_mbuf **pkts, \
+ struct rte_pipeline_table_entry **table_entries, \
+ void *arg) \
+{ \
+ pkt4_work_routing(pkts, table_entries, arg, arp, 0, 0, 1, color);\
+}
+
+#define routing_table_ah_hit_ether_mpls(color, arp) \
+PKT_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
+PKT4_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
+PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_mpls_color##color##_arp##arp,\
+ pkt_work_routing_ether_mpls_color##color##_arp##arp, \
+ pkt4_work_routing_ether_mpls_color##color##_arp##arp)
+
+routing_table_ah_hit_ether_mpls(0, 0)
+routing_table_ah_hit_ether_mpls(1, 0)
+routing_table_ah_hit_ether_mpls(0, 1)
+routing_table_ah_hit_ether_mpls(1, 1)
+
+static rte_pipeline_table_action_handler_hit
+get_routing_table_ah_hit(struct pipeline_routing *p)
+{
+ if (p->params.dbg_ah_disable)
+ return NULL;
+
+ switch (p->params.encap) {
+ case PIPELINE_ROUTING_ENCAP_ETHERNET:
+ return (p->params.n_arp_entries) ?
+ routing_table_ah_hit_ether_arp1 :
+ routing_table_ah_hit_ether_arp0;
+
+ case PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ:
+ if (p->params.n_arp_entries)
+ switch (p->params.qinq_sched) {
+ case 0:
+ return routing_table_ah_hit_ether_qinq_sched0_arp1;
+ case 1:
+ return routing_table_ah_hit_ether_qinq_sched1_arp1;
+ case 2:
+ return routing_table_ah_hit_ether_qinq_sched2_arp1;
+ default:
+ return NULL;
+ }
+ else
+ switch (p->params.qinq_sched) {
+ case 0:
+ return routing_table_ah_hit_ether_qinq_sched0_arp0;
+ case 1:
+ return routing_table_ah_hit_ether_qinq_sched1_arp0;
+ case 2:
+ return routing_table_ah_hit_ether_qinq_sched2_arp0;
+ default:
+ return NULL;
+ }
+
+ case PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS:
+ if (p->params.n_arp_entries)
+ if (p->params.mpls_color_mark)
+ return routing_table_ah_hit_ether_mpls_color1_arp1;
+ else
+ return routing_table_ah_hit_ether_mpls_color0_arp1;
+ else
+ if (p->params.mpls_color_mark)
+ return routing_table_ah_hit_ether_mpls_color1_arp0;
+ else
+ return routing_table_ah_hit_ether_mpls_color0_arp0;
+
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * ARP table
+ */
+struct arp_table_entry {
+ struct rte_pipeline_table_entry head;
+ uint64_t macaddr;
+};
+
+/**
+ * ARP table AH
+ */
+static inline void
+pkt_work_arp(
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry *table_entry,
+ __rte_unused void *arg)
+{
+ struct arp_table_entry *entry = (struct arp_table_entry *) table_entry;
+
+ /* Read */
+ uint64_t macaddr_dst = entry->macaddr;
+ uint64_t *slab_ptr = (uint64_t *) ((char *) pkt->buf_addr +
+ (pkt->data_off - 2));
+
+ /* Compute */
+
+ /* Write */
+ MACADDR_DST_WRITE(slab_ptr, macaddr_dst);
+}
+
+static inline void
+pkt4_work_arp(
+ struct rte_mbuf **pkts,
+ struct rte_pipeline_table_entry **table_entries,
+ __rte_unused void *arg)
+{
+ struct arp_table_entry *entry0 =
+ (struct arp_table_entry *) table_entries[0];
+ struct arp_table_entry *entry1 =
+ (struct arp_table_entry *) table_entries[1];
+ struct arp_table_entry *entry2 =
+ (struct arp_table_entry *) table_entries[2];
+ struct arp_table_entry *entry3 =
+ (struct arp_table_entry *) table_entries[3];
+
+ /* Read */
+ uint64_t macaddr_dst0 = entry0->macaddr;
+ uint64_t macaddr_dst1 = entry1->macaddr;
+ uint64_t macaddr_dst2 = entry2->macaddr;
+ uint64_t macaddr_dst3 = entry3->macaddr;
+
+ uint64_t *slab_ptr0 = (uint64_t *) ((char *) pkts[0]->buf_addr +
+ (pkts[0]->data_off - 2));
+ uint64_t *slab_ptr1 = (uint64_t *) ((char *) pkts[1]->buf_addr +
+ (pkts[1]->data_off - 2));
+ uint64_t *slab_ptr2 = (uint64_t *) ((char *) pkts[2]->buf_addr +
+ (pkts[2]->data_off - 2));
+ uint64_t *slab_ptr3 = (uint64_t *) ((char *) pkts[3]->buf_addr +
+ (pkts[3]->data_off - 2));
+
+ /* Compute */
+
+ /* Write */
+ MACADDR_DST_WRITE(slab_ptr0, macaddr_dst0);
+ MACADDR_DST_WRITE(slab_ptr1, macaddr_dst1);
+ MACADDR_DST_WRITE(slab_ptr2, macaddr_dst2);
+ MACADDR_DST_WRITE(slab_ptr3, macaddr_dst3);
+}
+
+PIPELINE_TABLE_AH_HIT(arp_table_ah_hit,
+ pkt_work_arp,
+ pkt4_work_arp);
+
+static rte_pipeline_table_action_handler_hit
+get_arp_table_ah_hit(struct pipeline_routing *p)
+{
+ if (p->params.dbg_ah_disable)
+ return NULL;
+
+ return arp_table_ah_hit;
+}
+
+/*
+ * Argument parsing
+ */
+int
+pipeline_routing_parse_args(struct pipeline_routing_params *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_routes_present = 0;
+ uint32_t encap_present = 0;
+ uint32_t qinq_sched_present = 0;
+ uint32_t mpls_color_mark_present = 0;
+ uint32_t n_arp_entries_present = 0;
+ uint32_t ip_hdr_offset_present = 0;
+ uint32_t arp_key_offset_present = 0;
+ uint32_t color_offset_present = 0;
+ uint32_t dbg_ah_disable_present = 0;
+ uint32_t i;
+
+ /* default values */
+ p->n_routes = PIPELINE_ROUTING_N_ROUTES_DEFAULT;
+ p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET;
+ p->qinq_sched = 0;
+ p->mpls_color_mark = 0;
+ p->n_arp_entries = 0;
+ p->dbg_ah_disable = 0;
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* n_routes */
+ if (strcmp(arg_name, "n_routes") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_routes_present == 0, params->name,
+ arg_name);
+ n_routes_present = 1;
+
+ status = parser_read_uint32(&p->n_routes,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->n_routes != 0)), params->name,
+ arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* encap */
+ if (strcmp(arg_name, "encap") == 0) {
+ PIPELINE_PARSE_ERR_DUPLICATE(encap_present == 0,
+ params->name, arg_name);
+ encap_present = 1;
+
+ /* ethernet */
+ if (strcmp(arg_value, "ethernet") == 0) {
+ p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET;
+ continue;
+ }
+
+ /* ethernet_qinq */
+ if (strcmp(arg_value, "ethernet_qinq") == 0) {
+ p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ;
+ continue;
+ }
+
+ /* ethernet_mpls */
+ if (strcmp(arg_value, "ethernet_mpls") == 0) {
+ p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS;
+ continue;
+ }
+
+ /* any other */
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+ }
+
+ /* qinq_sched */
+ if (strcmp(arg_name, "qinq_sched") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ qinq_sched_present == 0, params->name,
+ arg_name);
+ qinq_sched_present = 1;
+
+ status = parser_read_arg_bool(arg_value);
+ if (status == -EINVAL) {
+ if (strcmp(arg_value, "test") == 0) {
+ p->qinq_sched = 2;
+ continue;
+ }
+ } else {
+ p->qinq_sched = status;
+ continue;
+ }
+
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+ }
+
+ /* mpls_color_mark */
+ if (strcmp(arg_name, "mpls_color_mark") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ mpls_color_mark_present == 0,
+ params->name, arg_name);
+ mpls_color_mark_present = 1;
+
+
+ status = parser_read_arg_bool(arg_value);
+ if (status >= 0) {
+ p->mpls_color_mark = status;
+ continue;
+ }
+
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+ }
+
+ /* n_arp_entries */
+ if (strcmp(arg_name, "n_arp_entries") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_arp_entries_present == 0, params->name,
+ arg_name);
+ n_arp_entries_present = 1;
+
+ status = parser_read_uint32(&p->n_arp_entries,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* ip_hdr_offset */
+ if (strcmp(arg_name, "ip_hdr_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ ip_hdr_offset_present == 0, params->name,
+ arg_name);
+ ip_hdr_offset_present = 1;
+
+ status = parser_read_uint32(&p->ip_hdr_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* arp_key_offset */
+ if (strcmp(arg_name, "arp_key_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ arp_key_offset_present == 0, params->name,
+ arg_name);
+ arp_key_offset_present = 1;
+
+ status = parser_read_uint32(&p->arp_key_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* color_offset */
+ if (strcmp(arg_name, "color_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ color_offset_present == 0, params->name,
+ arg_name);
+ color_offset_present = 1;
+
+ status = parser_read_uint32(&p->color_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* debug */
+ if (strcmp(arg_name, "dbg_ah_disable") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dbg_ah_disable_present == 0, params->name,
+ arg_name);
+ dbg_ah_disable_present = 1;
+
+ status = parser_read_arg_bool(arg_value);
+ if (status >= 0) {
+ p->dbg_ah_disable = status;
+ continue;
+ }
+
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+
+ continue;
+ }
+
+ /* any other */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check that mandatory arguments are present */
+ PIPELINE_PARSE_ERR_MANDATORY(ip_hdr_offset_present, params->name,
+ "ip_hdr_offset");
+
+ /* Check relations between arguments */
+ switch (p->encap) {
+ case PIPELINE_ROUTING_ENCAP_ETHERNET:
+ PIPELINE_ARG_CHECK((!p->qinq_sched), "Parse error in "
+ "section \"%s\": encap = ethernet, therefore "
+ "qinq_sched = yes/test is not allowed",
+ params->name);
+ PIPELINE_ARG_CHECK((!p->mpls_color_mark), "Parse error "
+ "in section \"%s\": encap = ethernet, therefore "
+ "mpls_color_mark = yes is not allowed",
+ params->name);
+ PIPELINE_ARG_CHECK((!color_offset_present), "Parse error "
+ "in section \"%s\": encap = ethernet, therefore "
+ "color_offset is not allowed",
+ params->name);
+ break;
+
+ case PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ:
+ PIPELINE_ARG_CHECK((!p->mpls_color_mark), "Parse error "
+ "in section \"%s\": encap = ethernet_qinq, "
+ "therefore mpls_color_mark = yes is not allowed",
+ params->name);
+ PIPELINE_ARG_CHECK((!color_offset_present), "Parse error "
+ "in section \"%s\": encap = ethernet_qinq, "
+ "therefore color_offset is not allowed",
+ params->name);
+ break;
+
+ case PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS:
+ PIPELINE_ARG_CHECK((!p->qinq_sched), "Parse error in "
+ "section \"%s\": encap = ethernet_mpls, therefore "
+ "qinq_sched = yes/test is not allowed",
+ params->name);
+ break;
+ }
+
+ PIPELINE_ARG_CHECK((!(p->n_arp_entries &&
+ (!arp_key_offset_present))), "Parse error in section "
+ "\"%s\": n_arp_entries is set while "
+ "arp_key_offset is not set", params->name);
+
+ PIPELINE_ARG_CHECK((!((p->n_arp_entries == 0) &&
+ arp_key_offset_present)), "Parse error in section "
+ "\"%s\": arp_key_offset present while "
+ "n_arp_entries is not set", params->name);
+
+ return 0;
+}
+
+static void *
+pipeline_routing_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_routing *p_rt;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_routing));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_rt = (struct pipeline_routing *) p;
+ if (p == NULL)
+ return NULL;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Routing");
+
+ /* Parse arguments */
+ if (pipeline_routing_parse_args(&p_rt->params, params))
+ return NULL;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Routing table */
+ p->n_tables = 1;
+ {
+ struct rte_table_lpm_params table_lpm_params = {
+ .name = p->name,
+ .n_rules = p_rt->params.n_routes,
+ .number_tbl8s = PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s,
+ .flags = 0,
+ .entry_unique_size = sizeof(struct routing_table_entry),
+ .offset = p_rt->params.ip_hdr_offset +
+ __builtin_offsetof(struct ipv4_hdr, dst_addr),
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_lpm_ops,
+ .arg_create = &table_lpm_params,
+ .f_action_hit = get_routing_table_ah_hit(p_rt),
+ .f_action_miss = NULL,
+ .arg_ah = p_rt,
+ .action_data_size =
+ sizeof(struct routing_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* ARP table configuration */
+ if (p_rt->params.n_arp_entries) {
+ struct rte_table_hash_key8_ext_params table_arp_params = {
+ .n_entries = p_rt->params.n_arp_entries,
+ .n_entries_ext = p_rt->params.n_arp_entries,
+ .f_hash = hash_default_key8,
+ .seed = 0,
+ .signature_offset = 0, /* Unused */
+ .key_offset = p_rt->params.arp_key_offset,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_key8_ext_dosig_ops,
+ .arg_create = &table_arp_params,
+ .f_action_hit = get_arp_table_ah_hit(p_rt),
+ .f_action_miss = NULL,
+ .arg_ah = p_rt,
+ .action_data_size = sizeof(struct arp_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[1]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ p->n_tables++;
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_rt->custom_handlers,
+ custom_handlers,
+ sizeof(p_rt->custom_handlers));
+
+ return p;
+}
+
+static int
+pipeline_routing_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_routing_track(void *pipeline,
+ __rte_unused uint32_t port_in,
+ uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_in == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int
+pipeline_routing_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+void *
+pipeline_routing_msg_req_custom_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_ROUTING_MSG_REQS) ?
+ p_rt->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+void *
+pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
+ struct pipeline_routing_route_add_msg_req *req = msg;
+ struct pipeline_routing_route_add_msg_rsp *rsp = msg;
+
+ struct rte_table_lpm_key key = {
+ .ip = req->key.key.ipv4.ip,
+ .depth = req->key.key.ipv4.depth,
+ };
+
+ struct routing_table_entry entry_arp0 = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->data.port_id]},
+ },
+
+ .flags = req->data.flags,
+ .port_id = req->data.port_id,
+ .ip = 0,
+ .data_offset = 0,
+ .ether_l2_length = 0,
+ .slab = {0},
+ .slab_offset = {0},
+ };
+
+ struct routing_table_entry entry_arp1 = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_TABLE,
+ {.table_id = p->table_id[1]},
+ },
+
+ .flags = req->data.flags,
+ .port_id = req->data.port_id,
+ .ip = rte_bswap32(req->data.ethernet.ip),
+ .data_offset = 0,
+ .ether_l2_length = 0,
+ .slab = {0},
+ .slab_offset = {0},
+ };
+
+ struct rte_pipeline_table_entry *entry = (p_rt->params.n_arp_entries) ?
+ (struct rte_pipeline_table_entry *) &entry_arp1 :
+ (struct rte_pipeline_table_entry *) &entry_arp0;
+
+ if ((req->key.type != PIPELINE_ROUTING_ROUTE_IPV4) ||
+ ((p_rt->params.n_arp_entries == 0) &&
+ (req->data.flags & PIPELINE_ROUTING_ROUTE_ARP)) ||
+ (p_rt->params.n_arp_entries &&
+ ((req->data.flags & PIPELINE_ROUTING_ROUTE_ARP) == 0)) ||
+ ((p_rt->params.encap != PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
+ (req->data.flags & PIPELINE_ROUTING_ROUTE_QINQ)) ||
+ ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
+ ((req->data.flags & PIPELINE_ROUTING_ROUTE_QINQ) == 0)) ||
+ ((p_rt->params.encap != PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
+ (req->data.flags & PIPELINE_ROUTING_ROUTE_MPLS)) ||
+ ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
+ ((req->data.flags & PIPELINE_ROUTING_ROUTE_MPLS) == 0))) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ /* Ether - ARP off */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
+ (p_rt->params.n_arp_entries == 0)) {
+ uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_dst;
+ uint64_t ethertype = ETHER_TYPE_IPv4;
+
+ macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
+ macaddr_dst = rte_bswap64(macaddr_dst << 16);
+
+ entry_arp0.slab[0] =
+ rte_bswap64((macaddr_src << 16) | ethertype);
+ entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(macaddr_dst);
+ entry_arp0.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
+
+ entry_arp0.data_offset = entry_arp0.slab_offset[1] + 2
+ - sizeof(struct rte_mbuf);
+ entry_arp0.ether_l2_length = 14;
+ }
+
+ /* Ether - ARP on */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
+ p_rt->params.n_arp_entries) {
+ uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t ethertype = ETHER_TYPE_IPv4;
+
+ entry_arp1.slab[0] = rte_bswap64((macaddr_src << 16) |
+ ethertype);
+ entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.data_offset = entry_arp1.slab_offset[0] - 6
+ - sizeof(struct rte_mbuf);
+ entry_arp1.ether_l2_length = 14;
+ }
+
+ /* Ether QinQ - ARP off */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
+ (p_rt->params.n_arp_entries == 0)) {
+ uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_dst;
+ uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
+ uint64_t ethertype_vlan = 0x8100;
+ uint64_t ethertype_qinq = 0x9100;
+ uint64_t svlan = req->data.l2.qinq.svlan;
+ uint64_t cvlan = req->data.l2.qinq.cvlan;
+
+ macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
+ macaddr_dst = rte_bswap64(macaddr_dst << 16);
+
+ entry_arp0.slab[0] = rte_bswap64((svlan << 48) |
+ (ethertype_vlan << 32) |
+ (cvlan << 16) |
+ ethertype_ipv4);
+ entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64((macaddr_src << 16) |
+ ethertype_qinq);
+ entry_arp0.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
+
+ entry_arp0.slab[2] = rte_bswap64(macaddr_dst);
+ entry_arp0.slab_offset[2] = p_rt->params.ip_hdr_offset - 3 * 8;
+
+ entry_arp0.data_offset = entry_arp0.slab_offset[2] + 2
+ - sizeof(struct rte_mbuf);
+ entry_arp0.ether_l2_length = 22;
+ }
+
+ /* Ether QinQ - ARP on */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
+ p_rt->params.n_arp_entries) {
+ uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
+ uint64_t ethertype_vlan = 0x8100;
+ uint64_t ethertype_qinq = 0x9100;
+ uint64_t svlan = req->data.l2.qinq.svlan;
+ uint64_t cvlan = req->data.l2.qinq.cvlan;
+
+ entry_arp1.slab[0] = rte_bswap64((svlan << 48) |
+ (ethertype_vlan << 32) |
+ (cvlan << 16) |
+ ethertype_ipv4);
+ entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64((macaddr_src << 16) |
+ ethertype_qinq);
+ entry_arp1.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
+
+ entry_arp1.data_offset = entry_arp1.slab_offset[1] - 6
+ - sizeof(struct rte_mbuf);
+ entry_arp1.ether_l2_length = 22;
+ }
+
+ /* Ether MPLS - ARP off */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
+ (p_rt->params.n_arp_entries == 0)) {
+ uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_dst;
+ uint64_t ethertype_mpls = 0x8847;
+
+ uint64_t label0 = req->data.l2.mpls.labels[0];
+ uint64_t label1 = req->data.l2.mpls.labels[1];
+ uint64_t label2 = req->data.l2.mpls.labels[2];
+ uint64_t label3 = req->data.l2.mpls.labels[3];
+ uint32_t n_labels = req->data.l2.mpls.n_labels;
+
+ macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
+ macaddr_dst = rte_bswap64(macaddr_dst << 16);
+
+ switch (n_labels) {
+ case 1:
+ entry_arp0.slab[0] = 0;
+ entry_arp0.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(
+ MPLS_LABEL(label0, 0, 1, 0));
+ entry_arp0.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 8;
+ break;
+
+ case 2:
+ entry_arp0.slab[0] = 0;
+ entry_arp0.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(
+ (MPLS_LABEL(label0, 0, 0, 0) << 32) |
+ MPLS_LABEL(label1, 0, 1, 0));
+ entry_arp0.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 8;
+ break;
+
+ case 3:
+ entry_arp0.slab[0] = rte_bswap64(
+ (MPLS_LABEL(label1, 0, 0, 0) << 32) |
+ MPLS_LABEL(label2, 0, 1, 0));
+ entry_arp0.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(
+ MPLS_LABEL(label0, 0, 0, 0));
+ entry_arp0.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 2 * 8;
+ break;
+
+ case 4:
+ entry_arp0.slab[0] = rte_bswap64(
+ (MPLS_LABEL(label2, 0, 0, 0) << 32) |
+ MPLS_LABEL(label3, 0, 1, 0));
+ entry_arp0.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(
+ (MPLS_LABEL(label0, 0, 0, 0) << 32) |
+ MPLS_LABEL(label1, 0, 0, 0));
+ entry_arp0.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 2 * 8;
+ break;
+
+ default:
+ rsp->status = -1;
+ return rsp;
+ }
+
+ entry_arp0.slab[2] = rte_bswap64((macaddr_src << 16) |
+ ethertype_mpls);
+ entry_arp0.slab_offset[2] = p_rt->params.ip_hdr_offset -
+ (n_labels * 4 + 8);
+
+ entry_arp0.slab[3] = rte_bswap64(macaddr_dst);
+ entry_arp0.slab_offset[3] = p_rt->params.ip_hdr_offset -
+ (n_labels * 4 + 2 * 8);
+
+ entry_arp0.data_offset = entry_arp0.slab_offset[3] + 2
+ - sizeof(struct rte_mbuf);
+ entry_arp0.ether_l2_length = n_labels * 4 + 14;
+ }
+
+ /* Ether MPLS - ARP on */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
+ p_rt->params.n_arp_entries) {
+ uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t ethertype_mpls = 0x8847;
+
+ uint64_t label0 = req->data.l2.mpls.labels[0];
+ uint64_t label1 = req->data.l2.mpls.labels[1];
+ uint64_t label2 = req->data.l2.mpls.labels[2];
+ uint64_t label3 = req->data.l2.mpls.labels[3];
+ uint32_t n_labels = req->data.l2.mpls.n_labels;
+
+ switch (n_labels) {
+ case 1:
+ entry_arp1.slab[0] = 0;
+ entry_arp1.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64(
+ MPLS_LABEL(label0, 0, 1, 0));
+ entry_arp1.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 8;
+ break;
+
+ case 2:
+ entry_arp1.slab[0] = 0;
+ entry_arp1.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64(
+ (MPLS_LABEL(label0, 0, 0, 0) << 32) |
+ MPLS_LABEL(label1, 0, 1, 0));
+ entry_arp1.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 8;
+ break;
+
+ case 3:
+ entry_arp1.slab[0] = rte_bswap64(
+ (MPLS_LABEL(label1, 0, 0, 0) << 32) |
+ MPLS_LABEL(label2, 0, 1, 0));
+ entry_arp1.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64(
+ MPLS_LABEL(label0, 0, 0, 0));
+ entry_arp1.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 2 * 8;
+ break;
+
+ case 4:
+ entry_arp1.slab[0] = rte_bswap64(
+ (MPLS_LABEL(label2, 0, 0, 0) << 32) |
+ MPLS_LABEL(label3, 0, 1, 0));
+ entry_arp1.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64(
+ (MPLS_LABEL(label0, 0, 0, 0) << 32) |
+ MPLS_LABEL(label1, 0, 0, 0));
+ entry_arp1.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 2 * 8;
+ break;
+
+ default:
+ rsp->status = -1;
+ return rsp;
+ }
+
+ entry_arp1.slab[2] = rte_bswap64((macaddr_src << 16) |
+ ethertype_mpls);
+ entry_arp1.slab_offset[2] = p_rt->params.ip_hdr_offset -
+ (n_labels * 4 + 8);
+
+ entry_arp1.data_offset = entry_arp1.slab_offset[2] - 6
+ - sizeof(struct rte_mbuf);
+ entry_arp1.ether_l2_length = n_labels * 4 + 14;
+ }
+
+ rsp->status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &key,
+ entry,
+ &rsp->key_found,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_route_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_route_delete_msg_req *req = msg;
+ struct pipeline_routing_route_delete_msg_rsp *rsp = msg;
+
+ struct rte_table_lpm_key key = {
+ .ip = req->key.key.ipv4.ip,
+ .depth = req->key.key.ipv4.depth,
+ };
+
+ if (req->key.type != PIPELINE_ROUTING_ROUTE_IPV4) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ p->table_id[0],
+ &key,
+ &rsp->key_found,
+ NULL);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_route_add_default_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_routing_route_add_default_msg_req *req = msg;
+ struct pipeline_routing_route_add_default_msg_rsp *rsp = msg;
+
+ struct routing_table_entry default_entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+
+ .flags = 0,
+ .port_id = 0,
+ .ip = 0,
+ };
+
+ rsp->status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[0],
+ (struct rte_pipeline_table_entry *) &default_entry,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_route_del_default_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_routing_route_delete_default_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ p->table_id[0],
+ NULL);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_arp_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_arp_add_msg_req *req = msg;
+ struct pipeline_routing_arp_add_msg_rsp *rsp = msg;
+
+ struct pipeline_routing_arp_key_ipv4 key = {
+ .port_id = req->key.key.ipv4.port_id,
+ .ip = rte_bswap32(req->key.key.ipv4.ip),
+ };
+
+ struct arp_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->key.key.ipv4.port_id]},
+ },
+
+ .macaddr = 0, /* set below */
+ };
+
+ if (req->key.type != PIPELINE_ROUTING_ARP_IPV4) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ entry.macaddr = *((uint64_t *)&(req->macaddr));
+ entry.macaddr = entry.macaddr << 16;
+
+ rsp->status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[1],
+ &key,
+ (struct rte_pipeline_table_entry *) &entry,
+ &rsp->key_found,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_arp_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_arp_delete_msg_req *req = msg;
+ struct pipeline_routing_arp_delete_msg_rsp *rsp = msg;
+
+ struct pipeline_routing_arp_key_ipv4 key = {
+ .port_id = req->key.key.ipv4.port_id,
+ .ip = rte_bswap32(req->key.key.ipv4.ip),
+ };
+
+ if (req->key.type != PIPELINE_ROUTING_ARP_IPV4) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ p->table_id[1],
+ &key,
+ &rsp->key_found,
+ NULL);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_arp_add_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_arp_add_default_msg_req *req = msg;
+ struct pipeline_routing_arp_add_default_msg_rsp *rsp = msg;
+
+ struct arp_table_entry default_entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+
+ .macaddr = 0,
+ };
+
+ rsp->status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[1],
+ (struct rte_pipeline_table_entry *) &default_entry,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_arp_delete_default_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ p->table_id[1],
+ NULL);
+
+ return rsp;
+}
+
+struct pipeline_be_ops pipeline_routing_be_ops = {
+ .f_init = pipeline_routing_init,
+ .f_free = pipeline_routing_free,
+ .f_run = NULL,
+ .f_timer = pipeline_routing_timer,
+ .f_track = pipeline_routing_track,
+};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.h b/examples/ip_pipeline/pipeline/pipeline_routing_be.h
new file mode 100644
index 00000000..ec767b24
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.h
@@ -0,0 +1,296 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_ROUTING_BE_H__
+#define __INCLUDE_PIPELINE_ROUTING_BE_H__
+
+#include <rte_ether.h>
+
+#include "pipeline_common_be.h"
+
+/*
+ * Pipeline argument parsing
+ */
+#ifndef PIPELINE_ROUTING_N_ROUTES_DEFAULT
+#define PIPELINE_ROUTING_N_ROUTES_DEFAULT 4096
+#endif
+
+enum pipeline_routing_encap {
+ PIPELINE_ROUTING_ENCAP_ETHERNET = 0,
+ PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ,
+ PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS,
+};
+
+struct pipeline_routing_params {
+ /* routing */
+ uint32_t n_routes;
+
+ /* routing packet encapsulation */
+ enum pipeline_routing_encap encap;
+ uint32_t qinq_sched;
+ uint32_t mpls_color_mark;
+
+ /* arp */
+ uint32_t n_arp_entries;
+
+ /* packet buffer offsets */
+ uint32_t ip_hdr_offset;
+ uint32_t arp_key_offset;
+ uint32_t color_offset;
+
+ /* debug */
+ uint32_t dbg_ah_disable;
+};
+
+int
+pipeline_routing_parse_args(struct pipeline_routing_params *p,
+ struct pipeline_params *params);
+
+/*
+ * Route
+ */
+enum pipeline_routing_route_key_type {
+ PIPELINE_ROUTING_ROUTE_IPV4,
+};
+
+struct pipeline_routing_route_key_ipv4 {
+ uint32_t ip;
+ uint32_t depth;
+};
+
+struct pipeline_routing_route_key {
+ enum pipeline_routing_route_key_type type;
+ union {
+ struct pipeline_routing_route_key_ipv4 ipv4;
+ } key;
+};
+
+enum pipeline_routing_route_flags {
+ PIPELINE_ROUTING_ROUTE_LOCAL = 1 << 0, /* 0 = remote; 1 = local */
+ PIPELINE_ROUTING_ROUTE_ARP = 1 << 1, /* 0 = ARP OFF; 1 = ARP ON */
+ PIPELINE_ROUTING_ROUTE_QINQ = 1 << 2, /* 0 = QINQ OFF; 1 = QINQ ON */
+ PIPELINE_ROUTING_ROUTE_MPLS = 1 << 3, /* 0 = MPLS OFF; 1 = MPLS ON */
+};
+
+#define PIPELINE_ROUTING_MPLS_LABELS_MAX 4
+
+struct pipeline_routing_route_data {
+ uint32_t flags;
+ uint32_t port_id; /* Output port ID */
+
+ union {
+ /* Next hop IP (valid only when ARP is enabled) */
+ uint32_t ip;
+
+ /* Next hop MAC address (valid only when ARP disabled */
+ struct ether_addr macaddr;
+ } ethernet;
+
+ union {
+ struct {
+ uint16_t svlan;
+ uint16_t cvlan;
+ } qinq;
+
+ struct {
+ uint32_t labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
+ uint32_t n_labels;
+ } mpls;
+ } l2;
+};
+
+/*
+ * ARP
+ */
+enum pipeline_routing_arp_key_type {
+ PIPELINE_ROUTING_ARP_IPV4,
+};
+
+struct pipeline_routing_arp_key_ipv4 {
+ uint32_t port_id;
+ uint32_t ip;
+};
+
+struct pipeline_routing_arp_key {
+ enum pipeline_routing_arp_key_type type;
+ union {
+ struct pipeline_routing_arp_key_ipv4 ipv4;
+ } key;
+};
+
+/*
+ * Messages
+ */
+enum pipeline_routing_msg_req_type {
+ PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD,
+ PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL,
+ PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_ARP_ADD,
+ PIPELINE_ROUTING_MSG_REQ_ARP_DEL,
+ PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQS
+};
+
+/*
+ * MSG ROUTE ADD
+ */
+struct pipeline_routing_route_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_routing_route_key key;
+
+ /* data */
+ struct pipeline_routing_route_data data;
+};
+
+struct pipeline_routing_route_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ROUTE DELETE
+ */
+struct pipeline_routing_route_delete_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_routing_route_key key;
+};
+
+struct pipeline_routing_route_delete_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ROUTE ADD DEFAULT
+ */
+struct pipeline_routing_route_add_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* data */
+ uint32_t port_id;
+};
+
+struct pipeline_routing_route_add_default_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ROUTE DELETE DEFAULT
+ */
+struct pipeline_routing_route_delete_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+};
+
+struct pipeline_routing_route_delete_default_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG ARP ADD
+ */
+struct pipeline_routing_arp_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_routing_arp_key key;
+
+ /* data */
+ struct ether_addr macaddr;
+};
+
+struct pipeline_routing_arp_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ARP DELETE
+ */
+struct pipeline_routing_arp_delete_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_routing_arp_key key;
+};
+
+struct pipeline_routing_arp_delete_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ARP ADD DEFAULT
+ */
+struct pipeline_routing_arp_add_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* data */
+ uint32_t port_id;
+};
+
+struct pipeline_routing_arp_add_default_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ARP DELETE DEFAULT
+ */
+struct pipeline_routing_arp_delete_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+};
+
+struct pipeline_routing_arp_delete_default_msg_rsp {
+ int status;
+};
+
+extern struct pipeline_be_ops pipeline_routing_be_ops;
+
+#endif
diff --git a/examples/ip_pipeline/pipeline_be.h b/examples/ip_pipeline/pipeline_be.h
new file mode 100644
index 00000000..f4ff262e
--- /dev/null
+++ b/examples/ip_pipeline/pipeline_be.h
@@ -0,0 +1,305 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_BE_H__
+#define __INCLUDE_PIPELINE_BE_H__
+
+#include <rte_port_ethdev.h>
+#include <rte_port_ring.h>
+#include <rte_port_frag.h>
+#include <rte_port_ras.h>
+#include <rte_port_sched.h>
+#include <rte_port_source_sink.h>
+#include <rte_pipeline.h>
+
+enum pipeline_port_in_type {
+ PIPELINE_PORT_IN_ETHDEV_READER,
+ PIPELINE_PORT_IN_RING_READER,
+ PIPELINE_PORT_IN_RING_MULTI_READER,
+ PIPELINE_PORT_IN_RING_READER_IPV4_FRAG,
+ PIPELINE_PORT_IN_RING_READER_IPV6_FRAG,
+ PIPELINE_PORT_IN_SCHED_READER,
+ PIPELINE_PORT_IN_SOURCE,
+};
+
+struct pipeline_port_in_params {
+ enum pipeline_port_in_type type;
+ union {
+ struct rte_port_ethdev_reader_params ethdev;
+ struct rte_port_ring_reader_params ring;
+ struct rte_port_ring_multi_reader_params ring_multi;
+ struct rte_port_ring_reader_ipv4_frag_params ring_ipv4_frag;
+ struct rte_port_ring_reader_ipv6_frag_params ring_ipv6_frag;
+ struct rte_port_sched_reader_params sched;
+ struct rte_port_source_params source;
+ } params;
+ uint32_t burst_size;
+};
+
+static inline void *
+pipeline_port_in_params_convert(struct pipeline_port_in_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_IN_ETHDEV_READER:
+ return (void *) &p->params.ethdev;
+ case PIPELINE_PORT_IN_RING_READER:
+ return (void *) &p->params.ring;
+ case PIPELINE_PORT_IN_RING_MULTI_READER:
+ return (void *) &p->params.ring_multi;
+ case PIPELINE_PORT_IN_RING_READER_IPV4_FRAG:
+ return (void *) &p->params.ring_ipv4_frag;
+ case PIPELINE_PORT_IN_RING_READER_IPV6_FRAG:
+ return (void *) &p->params.ring_ipv6_frag;
+ case PIPELINE_PORT_IN_SCHED_READER:
+ return (void *) &p->params.sched;
+ case PIPELINE_PORT_IN_SOURCE:
+ return (void *) &p->params.source;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct rte_port_in_ops *
+pipeline_port_in_params_get_ops(struct pipeline_port_in_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_IN_ETHDEV_READER:
+ return &rte_port_ethdev_reader_ops;
+ case PIPELINE_PORT_IN_RING_READER:
+ return &rte_port_ring_reader_ops;
+ case PIPELINE_PORT_IN_RING_MULTI_READER:
+ return &rte_port_ring_multi_reader_ops;
+ case PIPELINE_PORT_IN_RING_READER_IPV4_FRAG:
+ return &rte_port_ring_reader_ipv4_frag_ops;
+ case PIPELINE_PORT_IN_RING_READER_IPV6_FRAG:
+ return &rte_port_ring_reader_ipv6_frag_ops;
+ case PIPELINE_PORT_IN_SCHED_READER:
+ return &rte_port_sched_reader_ops;
+ case PIPELINE_PORT_IN_SOURCE:
+ return &rte_port_source_ops;
+ default:
+ return NULL;
+ }
+}
+
+enum pipeline_port_out_type {
+ PIPELINE_PORT_OUT_ETHDEV_WRITER,
+ PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_WRITER,
+ PIPELINE_PORT_OUT_RING_MULTI_WRITER,
+ PIPELINE_PORT_OUT_RING_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS,
+ PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS,
+ PIPELINE_PORT_OUT_SCHED_WRITER,
+ PIPELINE_PORT_OUT_SINK,
+};
+
+struct pipeline_port_out_params {
+ enum pipeline_port_out_type type;
+ union {
+ struct rte_port_ethdev_writer_params ethdev;
+ struct rte_port_ethdev_writer_nodrop_params ethdev_nodrop;
+ struct rte_port_ring_writer_params ring;
+ struct rte_port_ring_multi_writer_params ring_multi;
+ struct rte_port_ring_writer_nodrop_params ring_nodrop;
+ struct rte_port_ring_multi_writer_nodrop_params ring_multi_nodrop;
+ struct rte_port_ring_writer_ipv4_ras_params ring_ipv4_ras;
+ struct rte_port_ring_writer_ipv6_ras_params ring_ipv6_ras;
+ struct rte_port_sched_writer_params sched;
+ struct rte_port_sink_params sink;
+ } params;
+};
+
+static inline void *
+pipeline_port_out_params_convert(struct pipeline_port_out_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER:
+ return (void *) &p->params.ethdev;
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP:
+ return (void *) &p->params.ethdev_nodrop;
+ case PIPELINE_PORT_OUT_RING_WRITER:
+ return (void *) &p->params.ring;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER:
+ return (void *) &p->params.ring_multi;
+ case PIPELINE_PORT_OUT_RING_WRITER_NODROP:
+ return (void *) &p->params.ring_nodrop;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP:
+ return (void *) &p->params.ring_multi_nodrop;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS:
+ return (void *) &p->params.ring_ipv4_ras;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS:
+ return (void *) &p->params.ring_ipv6_ras;
+ case PIPELINE_PORT_OUT_SCHED_WRITER:
+ return (void *) &p->params.sched;
+ case PIPELINE_PORT_OUT_SINK:
+ return (void *) &p->params.sink;
+ default:
+ return NULL;
+ }
+}
+
+static inline void *
+pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER:
+ return &rte_port_ethdev_writer_ops;
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP:
+ return &rte_port_ethdev_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER:
+ return &rte_port_ring_writer_ops;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER:
+ return &rte_port_ring_multi_writer_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_NODROP:
+ return &rte_port_ring_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP:
+ return &rte_port_ring_multi_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS:
+ return &rte_port_ring_writer_ipv4_ras_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS:
+ return &rte_port_ring_writer_ipv6_ras_ops;
+ case PIPELINE_PORT_OUT_SCHED_WRITER:
+ return &rte_port_sched_writer_ops;
+ case PIPELINE_PORT_OUT_SINK:
+ return &rte_port_sink_ops;
+ default:
+ return NULL;
+ }
+}
+
+#ifndef PIPELINE_NAME_SIZE
+#define PIPELINE_NAME_SIZE 32
+#endif
+
+#ifndef PIPELINE_MAX_PORT_IN
+#define PIPELINE_MAX_PORT_IN 16
+#endif
+
+#ifndef PIPELINE_MAX_PORT_OUT
+#define PIPELINE_MAX_PORT_OUT 16
+#endif
+
+#ifndef PIPELINE_MAX_TABLES
+#define PIPELINE_MAX_TABLES 16
+#endif
+
+#ifndef PIPELINE_MAX_MSGQ_IN
+#define PIPELINE_MAX_MSGQ_IN 16
+#endif
+
+#ifndef PIPELINE_MAX_MSGQ_OUT
+#define PIPELINE_MAX_MSGQ_OUT 16
+#endif
+
+#ifndef PIPELINE_MAX_ARGS
+#define PIPELINE_MAX_ARGS 32
+#endif
+
+struct pipeline_params {
+ char name[PIPELINE_NAME_SIZE];
+
+ struct pipeline_port_in_params port_in[PIPELINE_MAX_PORT_IN];
+ struct pipeline_port_out_params port_out[PIPELINE_MAX_PORT_OUT];
+ struct rte_ring *msgq_in[PIPELINE_MAX_MSGQ_IN];
+ struct rte_ring *msgq_out[PIPELINE_MAX_MSGQ_OUT];
+
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_msgq;
+
+ int socket_id;
+
+ char *args_name[PIPELINE_MAX_ARGS];
+ char *args_value[PIPELINE_MAX_ARGS];
+ uint32_t n_args;
+
+ uint32_t log_level;
+};
+
+/*
+ * Pipeline type back-end operations
+ */
+
+typedef void* (*pipeline_be_op_init)(struct pipeline_params *params,
+ void *arg);
+
+typedef int (*pipeline_be_op_free)(void *pipeline);
+
+typedef int (*pipeline_be_op_run)(void *pipeline);
+
+typedef int (*pipeline_be_op_timer)(void *pipeline);
+
+typedef int (*pipeline_be_op_track)(void *pipeline,
+ uint32_t port_in,
+ uint32_t *port_out);
+
+struct pipeline_be_ops {
+ pipeline_be_op_init f_init;
+ pipeline_be_op_free f_free;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+ pipeline_be_op_track f_track;
+};
+
+/* Pipeline specific config parse error messages */
+#define PIPELINE_ARG_CHECK(exp, fmt, ...) \
+do { \
+ if (!(exp)) { \
+ fprintf(stderr, fmt "\n", ## __VA_ARGS__); \
+ return -1; \
+ } \
+} while (0)
+
+#define PIPELINE_PARSE_ERR_INV_VAL(exp, section, entry, val) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": entry \"%s\" " \
+ "has invalid value (\"%s\")", section, entry, val)
+
+#define PIPELINE_PARSE_ERR_OUT_RNG(exp, section, entry, val) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": entry \"%s\" " \
+ "value is out of range (\"%s\")", section, entry, val)
+
+#define PIPELINE_PARSE_ERR_DUPLICATE(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": duplicated " \
+ "entry \"%s\"", section, entry)
+
+#define PIPELINE_PARSE_ERR_INV_ENT(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": invalid entry " \
+ "\"%s\"", section, entry)
+
+#define PIPELINE_PARSE_ERR_MANDATORY(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": mandatory " \
+ "entry \"%s\" is missing", section, entry)
+
+#endif
diff --git a/examples/ip_pipeline/thread.c b/examples/ip_pipeline/thread.c
new file mode 100644
index 00000000..a0f1f12f
--- /dev/null
+++ b/examples/ip_pipeline/thread.c
@@ -0,0 +1,322 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_common_be.h"
+#include "app.h"
+#include "thread.h"
+
+#if APP_THREAD_HEADROOM_STATS_COLLECT
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline) \
+do { \
+ uint64_t t0 = rte_rdtsc_precise(); \
+ int n_pkts = rte_pipeline_run(pipeline->p); \
+ \
+ if (n_pkts == 0) { \
+ uint64_t t1 = rte_rdtsc_precise(); \
+ \
+ thread->headroom_cycles += t1 - t0; \
+ } \
+} while (0)
+
+
+#define PIPELINE_RUN_CUSTOM(thread, data) \
+do { \
+ uint64_t t0 = rte_rdtsc_precise(); \
+ int n_pkts = data->f_run(data->be); \
+ \
+ if (n_pkts == 0) { \
+ uint64_t t1 = rte_rdtsc_precise(); \
+ \
+ thread->headroom_cycles += t1 - t0; \
+ } \
+} while (0)
+
+#else
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline) \
+ rte_pipeline_run(pipeline->p)
+
+#define PIPELINE_RUN_CUSTOM(thread, data) \
+ data->f_run(data->be)
+
+#endif
+
+static inline void *
+thread_msg_recv(struct rte_ring *r)
+{
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void
+thread_msg_send(struct rte_ring *r,
+ void *msg)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+static int
+thread_pipeline_enable(struct app_thread_data *t,
+ struct thread_pipeline_enable_msg_req *req)
+{
+ struct app_thread_pipeline_data *p;
+
+ if (req->f_run == NULL) {
+ if (t->n_regular >= APP_MAX_THREAD_PIPELINES)
+ return -1;
+ } else {
+ if (t->n_custom >= APP_MAX_THREAD_PIPELINES)
+ return -1;
+ }
+
+ p = (req->f_run == NULL) ?
+ &t->regular[t->n_regular] :
+ &t->custom[t->n_custom];
+
+ p->pipeline_id = req->pipeline_id;
+ p->be = req->be;
+ p->f_run = req->f_run;
+ p->f_timer = req->f_timer;
+ p->timer_period = req->timer_period;
+ p->deadline = 0;
+
+ if (req->f_run == NULL)
+ t->n_regular++;
+ else
+ t->n_custom++;
+
+ return 0;
+}
+
+static int
+thread_pipeline_disable(struct app_thread_data *t,
+ struct thread_pipeline_disable_msg_req *req)
+{
+ uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
+ uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
+ uint32_t i;
+
+ /* search regular pipelines of current thread */
+ for (i = 0; i < n_regular; i++) {
+ if (t->regular[i].pipeline_id != req->pipeline_id)
+ continue;
+
+ if (i < n_regular - 1)
+ memcpy(&t->regular[i],
+ &t->regular[i+1],
+ (n_regular - 1 - i) * sizeof(struct app_thread_pipeline_data));
+
+ n_regular--;
+ t->n_regular = n_regular;
+
+ return 0;
+ }
+
+ /* search custom pipelines of current thread */
+ for (i = 0; i < n_custom; i++) {
+ if (t->custom[i].pipeline_id != req->pipeline_id)
+ continue;
+
+ if (i < n_custom - 1)
+ memcpy(&t->custom[i],
+ &t->custom[i+1],
+ (n_custom - 1 - i) * sizeof(struct app_thread_pipeline_data));
+
+ n_custom--;
+ t->n_custom = n_custom;
+
+ return 0;
+ }
+
+ /* return if pipeline not found */
+ return -1;
+}
+
+static int
+thread_msg_req_handle(struct app_thread_data *t)
+{
+ void *msg_ptr;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+
+ msg_ptr = thread_msg_recv(t->msgq_in);
+ req = msg_ptr;
+ rsp = msg_ptr;
+
+ if (req != NULL)
+ switch (req->type) {
+ case THREAD_MSG_REQ_PIPELINE_ENABLE: {
+ rsp->status = thread_pipeline_enable(t,
+ (struct thread_pipeline_enable_msg_req *) req);
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+
+ case THREAD_MSG_REQ_PIPELINE_DISABLE: {
+ rsp->status = thread_pipeline_disable(t,
+ (struct thread_pipeline_disable_msg_req *) req);
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+
+ case THREAD_MSG_REQ_HEADROOM_READ: {
+ struct thread_headroom_read_msg_rsp *rsp =
+ (struct thread_headroom_read_msg_rsp *)
+ req;
+
+ rsp->headroom_ratio = t->headroom_ratio;
+ rsp->status = 0;
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void
+thread_headroom_update(struct app_thread_data *t, uint64_t time)
+{
+ uint64_t time_diff = time - t->headroom_time;
+
+ t->headroom_ratio =
+ ((double) t->headroom_cycles) / ((double) time_diff);
+
+ t->headroom_cycles = 0;
+ t->headroom_time = rte_rdtsc_precise();
+}
+
+int
+app_thread(void *arg)
+{
+ struct app_params *app = (struct app_params *) arg;
+ uint32_t core_id = rte_lcore_id(), i, j;
+ struct app_thread_data *t = &app->thread_data[core_id];
+
+ for (i = 0; ; i++) {
+ uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
+ uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
+
+ /* Run regular pipelines */
+ for (j = 0; j < n_regular; j++) {
+ struct app_thread_pipeline_data *data = &t->regular[j];
+ struct pipeline *p = data->be;
+
+ PIPELINE_RUN_REGULAR(t, p);
+ }
+
+ /* Run custom pipelines */
+ for (j = 0; j < n_custom; j++) {
+ struct app_thread_pipeline_data *data = &t->custom[j];
+
+ PIPELINE_RUN_CUSTOM(t, data);
+ }
+
+ /* Timer */
+ if ((i & 0xF) == 0) {
+ uint64_t time = rte_get_tsc_cycles();
+ uint64_t t_deadline = UINT64_MAX;
+
+ if (time < t->deadline)
+ continue;
+
+ /* Timer for regular pipelines */
+ for (j = 0; j < n_regular; j++) {
+ struct app_thread_pipeline_data *data =
+ &t->regular[j];
+ uint64_t p_deadline = data->deadline;
+
+ if (p_deadline <= time) {
+ data->f_timer(data->be);
+ p_deadline = time + data->timer_period;
+ data->deadline = p_deadline;
+ }
+
+ if (p_deadline < t_deadline)
+ t_deadline = p_deadline;
+ }
+
+ /* Timer for custom pipelines */
+ for (j = 0; j < n_custom; j++) {
+ struct app_thread_pipeline_data *data =
+ &t->custom[j];
+ uint64_t p_deadline = data->deadline;
+
+ if (p_deadline <= time) {
+ data->f_timer(data->be);
+ p_deadline = time + data->timer_period;
+ data->deadline = p_deadline;
+ }
+
+ if (p_deadline < t_deadline)
+ t_deadline = p_deadline;
+ }
+
+ /* Timer for thread message request */
+ {
+ uint64_t deadline = t->thread_req_deadline;
+
+ if (deadline <= time) {
+ thread_msg_req_handle(t);
+ thread_headroom_update(t, time);
+ deadline = time + t->timer_period;
+ t->thread_req_deadline = deadline;
+ }
+
+ if (deadline < t_deadline)
+ t_deadline = deadline;
+ }
+
+
+ t->deadline = t_deadline;
+ }
+ }
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/thread.h b/examples/ip_pipeline/thread.h
new file mode 100644
index 00000000..e52b22e6
--- /dev/null
+++ b/examples/ip_pipeline/thread.h
@@ -0,0 +1,98 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THREAD_H_
+#define THREAD_H_
+
+#include "app.h"
+#include "pipeline_be.h"
+
+enum thread_msg_req_type {
+ THREAD_MSG_REQ_PIPELINE_ENABLE = 0,
+ THREAD_MSG_REQ_PIPELINE_DISABLE,
+ THREAD_MSG_REQ_HEADROOM_READ,
+ THREAD_MSG_REQS
+};
+
+struct thread_msg_req {
+ enum thread_msg_req_type type;
+};
+
+struct thread_msg_rsp {
+ int status;
+};
+
+/*
+ * PIPELINE ENABLE
+ */
+struct thread_pipeline_enable_msg_req {
+ enum thread_msg_req_type type;
+
+ uint32_t pipeline_id;
+ void *be;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+ uint64_t timer_period;
+};
+
+struct thread_pipeline_enable_msg_rsp {
+ int status;
+};
+
+/*
+ * PIPELINE DISABLE
+ */
+struct thread_pipeline_disable_msg_req {
+ enum thread_msg_req_type type;
+
+ uint32_t pipeline_id;
+};
+
+struct thread_pipeline_disable_msg_rsp {
+ int status;
+};
+
+/*
+ * THREAD HEADROOM
+ */
+struct thread_headroom_read_msg_req {
+ enum thread_msg_req_type type;
+};
+
+struct thread_headroom_read_msg_rsp {
+ int status;
+
+ double headroom_ratio;
+};
+
+#endif /* THREAD_H_ */
diff --git a/examples/ip_pipeline/thread_fe.c b/examples/ip_pipeline/thread_fe.c
new file mode 100644
index 00000000..4a435f7c
--- /dev/null
+++ b/examples/ip_pipeline/thread_fe.c
@@ -0,0 +1,461 @@
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "thread.h"
+#include "thread_fe.h"
+#include "pipeline.h"
+#include "pipeline_common_fe.h"
+#include "app.h"
+
+static inline void *
+thread_msg_send_recv(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id,
+ void *msg,
+ uint32_t timeout_ms)
+{
+ struct rte_ring *r_req = app_thread_msgq_in_get(app,
+ socket_id, core_id, ht_id);
+ struct rte_ring *r_rsp = app_thread_msgq_out_get(app,
+ socket_id, core_id, ht_id);
+ uint64_t hz = rte_get_tsc_hz();
+ void *msg_recv;
+ uint64_t deadline;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(r_req, (void *) msg);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ deadline = (timeout_ms) ?
+ (rte_rdtsc() + ((hz * timeout_ms) / 1000)) :
+ UINT64_MAX;
+
+ do {
+ if (rte_rdtsc() > deadline)
+ return NULL;
+
+ status = rte_ring_sc_dequeue(r_rsp, &msg_recv);
+ } while (status != 0);
+
+ return msg_recv;
+}
+
+int
+app_pipeline_enable(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id)
+{
+ struct thread_pipeline_enable_msg_req *req;
+ struct thread_pipeline_enable_msg_rsp *rsp;
+ int thread_id;
+ struct app_pipeline_data *p;
+ struct app_pipeline_params *p_params;
+ struct pipeline_type *p_type;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) ||
+ ((app->core_mask & (1LLU << thread_id)) == 0))
+ return -1;
+
+ if (app_pipeline_data(app, pipeline_id) == NULL)
+ return -1;
+
+ p = &app->pipeline_data[pipeline_id];
+ p_params = &app->pipeline_params[pipeline_id];
+ p_type = app_pipeline_type_find(app, p_params->type);
+
+ if (p->enabled == 1)
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_PIPELINE_ENABLE;
+ req->pipeline_id = pipeline_id;
+ req->be = p->be;
+ req->f_run = p_type->be_ops->f_run;
+ req->f_timer = p_type->be_ops->f_timer;
+ req->timer_period = p->timer_period;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+ app_msg_free(app, rsp);
+
+ if (status != 0)
+ return -1;
+
+ p->enabled = 1;
+ return 0;
+}
+
+int
+app_pipeline_disable(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id)
+{
+ struct thread_pipeline_disable_msg_req *req;
+ struct thread_pipeline_disable_msg_rsp *rsp;
+ int thread_id;
+ struct app_pipeline_data *p;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) ||
+ ((app->core_mask & (1LLU << thread_id)) == 0))
+ return -1;
+
+ if (app_pipeline_data(app, pipeline_id) == NULL)
+ return -1;
+
+ p = &app->pipeline_data[pipeline_id];
+
+ if (p->enabled == 0)
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_PIPELINE_DISABLE;
+ req->pipeline_id = pipeline_id;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+ app_msg_free(app, rsp);
+
+ if (status != 0)
+ return -1;
+
+ p->enabled = 0;
+ return 0;
+}
+
+int
+app_thread_headroom(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id)
+{
+ struct thread_headroom_read_msg_req *req;
+ struct thread_headroom_read_msg_rsp *rsp;
+ int thread_id;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) ||
+ ((app->core_mask & (1LLU << thread_id)) == 0))
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_HEADROOM_READ;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+
+ if (status != 0)
+ return -1;
+
+ printf("%.3f%%\n", rsp->headroom_ratio * 100);
+
+
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * pipeline enable
+ */
+
+struct cmd_pipeline_enable_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t pipeline_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t enable_string;
+};
+
+static void
+cmd_pipeline_enable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_pipeline_enable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_pipeline_enable(app,
+ socket_id,
+ core_id,
+ hyper_th_id,
+ params->pipeline_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_pipeline_enable_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_string, "t");
+
+cmdline_parse_token_string_t cmd_pipeline_enable_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_id_string,
+ NULL);
+
+cmdline_parse_token_string_t cmd_pipeline_enable_pipeline_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_string,
+ "pipeline");
+
+cmdline_parse_token_num_t cmd_pipeline_enable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_pipeline_enable_enable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, enable_string,
+ "enable");
+
+cmdline_parse_inst_t cmd_pipeline_enable = {
+ .f = cmd_pipeline_enable_parsed,
+ .data = NULL,
+ .help_str = "Enable pipeline on specified core",
+ .tokens = {
+ (void *)&cmd_pipeline_enable_t_string,
+ (void *)&cmd_pipeline_enable_t_id_string,
+ (void *)&cmd_pipeline_enable_pipeline_string,
+ (void *)&cmd_pipeline_enable_pipeline_id,
+ (void *)&cmd_pipeline_enable_enable_string,
+ NULL,
+ },
+};
+
+/*
+ * pipeline disable
+ */
+
+struct cmd_pipeline_disable_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t pipeline_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t disable_string;
+};
+
+static void
+cmd_pipeline_disable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_pipeline_disable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_pipeline_disable(app,
+ socket_id,
+ core_id,
+ hyper_th_id,
+ params->pipeline_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_pipeline_disable_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_string, "t");
+
+cmdline_parse_token_string_t cmd_pipeline_disable_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_id_string,
+ NULL);
+
+cmdline_parse_token_string_t cmd_pipeline_disable_pipeline_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result,
+ pipeline_string, "pipeline");
+
+cmdline_parse_token_num_t cmd_pipeline_disable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipeline_disable_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_pipeline_disable_disable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, disable_string,
+ "disable");
+
+cmdline_parse_inst_t cmd_pipeline_disable = {
+ .f = cmd_pipeline_disable_parsed,
+ .data = NULL,
+ .help_str = "Disable pipeline on specified core",
+ .tokens = {
+ (void *)&cmd_pipeline_disable_t_string,
+ (void *)&cmd_pipeline_disable_t_id_string,
+ (void *)&cmd_pipeline_disable_pipeline_string,
+ (void *)&cmd_pipeline_disable_pipeline_id,
+ (void *)&cmd_pipeline_disable_disable_string,
+ NULL,
+ },
+};
+
+
+/*
+ * thread headroom
+ */
+
+struct cmd_thread_headroom_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t headroom_string;
+};
+
+static void
+cmd_thread_headroom_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_thread_headroom_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_thread_headroom(app,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_thread_headroom_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ t_string, "t");
+
+cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ t_id_string, NULL);
+
+cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ headroom_string, "headroom");
+
+cmdline_parse_inst_t cmd_thread_headroom = {
+ .f = cmd_thread_headroom_parsed,
+ .data = NULL,
+ .help_str = "Display thread headroom",
+ .tokens = {
+ (void *)&cmd_thread_headroom_t_string,
+ (void *)&cmd_thread_headroom_t_id_string,
+ (void *)&cmd_thread_headroom_headroom_string,
+ NULL,
+ },
+};
+
+
+static cmdline_parse_ctx_t thread_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_pipeline_enable,
+ (cmdline_parse_inst_t *) &cmd_pipeline_disable,
+ (cmdline_parse_inst_t *) &cmd_thread_headroom,
+ NULL,
+};
+
+int
+app_pipeline_thread_cmd_push(struct app_params *app)
+{
+ uint32_t n_cmds, i;
+
+ /* Check for available slots in the application commands array */
+ n_cmds = RTE_DIM(thread_cmds) - 1;
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push thread commands into the application */
+ memcpy(&app->cmds[app->n_cmds], thread_cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/thread_fe.h b/examples/ip_pipeline/thread_fe.h
new file mode 100644
index 00000000..2fd4ee8e
--- /dev/null
+++ b/examples/ip_pipeline/thread_fe.h
@@ -0,0 +1,101 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THREAD_FE_H_
+#define THREAD_FE_H_
+
+static inline struct rte_ring *
+app_thread_msgq_in_get(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id)
+{
+ char msgq_name[32];
+ ssize_t param_idx;
+
+ snprintf(msgq_name, sizeof(msgq_name),
+ "MSGQ-REQ-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ socket_id,
+ core_id,
+ (ht_id) ? "h" : "");
+ param_idx = APP_PARAM_FIND(app->msgq_params, msgq_name);
+
+ if (param_idx < 0)
+ return NULL;
+
+ return app->msgq[param_idx];
+}
+
+static inline struct rte_ring *
+app_thread_msgq_out_get(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id)
+{
+ char msgq_name[32];
+ ssize_t param_idx;
+
+ snprintf(msgq_name, sizeof(msgq_name),
+ "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ socket_id,
+ core_id,
+ (ht_id) ? "h" : "");
+ param_idx = APP_PARAM_FIND(app->msgq_params, msgq_name);
+
+ if (param_idx < 0)
+ return NULL;
+
+ return app->msgq[param_idx];
+
+}
+
+int
+app_pipeline_thread_cmd_push(struct app_params *app);
+
+int
+app_pipeline_enable(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id);
+
+int
+app_pipeline_disable(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id);
+
+int
+app_thread_headroom(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id);
+
+#endif /* THREAD_FE_H_ */