aboutsummaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 09:22:35 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 16:15:13 +0200
commit809f08006d56e7ba4ce190b0a63d44acf62d8044 (patch)
treed93fbe3244ee0cff16a6af830c7efb15c26e5ef4 /examples
parentb8ce7c38b99df118002fb460e680fabf16944f6c (diff)
Imported Upstream version 16.07-rc1
Change-Id: If3f757dc95532706b04053286c6b54492169f1a3 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/Makefile3
-rw-r--r--examples/distributor/main.c36
-rw-r--r--examples/dpdk_qat/main.c2
-rw-r--r--examples/ethtool/ethtool-app/ethapp.c1
-rw-r--r--examples/ethtool/lib/Makefile4
-rw-r--r--examples/ethtool/lib/rte_ethtool.c12
-rw-r--r--examples/exception_path/main.c3
-rw-r--r--examples/ip_fragmentation/main.c6
-rw-r--r--examples/ip_pipeline/Makefile1
-rw-r--r--examples/ip_pipeline/app.h403
-rw-r--r--examples/ip_pipeline/config/action.cfg68
-rw-r--r--examples/ip_pipeline/config/action.sh119
-rw-r--r--examples/ip_pipeline/config/action.txt8
-rw-r--r--examples/ip_pipeline/config/edge_router_downstream.cfg30
-rw-r--r--examples/ip_pipeline/config/edge_router_downstream.sh7
-rw-r--r--examples/ip_pipeline/config/edge_router_upstream.cfg36
-rw-r--r--examples/ip_pipeline/config/edge_router_upstream.sh37
-rw-r--r--examples/ip_pipeline/config/firewall.cfg68
-rw-r--r--examples/ip_pipeline/config/firewall.sh13
-rw-r--r--examples/ip_pipeline/config/firewall.txt9
-rw-r--r--examples/ip_pipeline/config/flow.cfg72
-rw-r--r--examples/ip_pipeline/config/flow.sh25
-rw-r--r--examples/ip_pipeline/config/flow.txt17
-rw-r--r--examples/ip_pipeline/config/kni.cfg67
-rw-r--r--examples/ip_pipeline/config/l2fwd.cfg5
-rw-r--r--examples/ip_pipeline/config/l3fwd.cfg9
-rw-r--r--examples/ip_pipeline/config/l3fwd.sh32
-rw-r--r--examples/ip_pipeline/config/l3fwd_arp.cfg70
-rw-r--r--examples/ip_pipeline/config/l3fwd_arp.sh43
-rw-r--r--examples/ip_pipeline/config/network_layers.cfg223
-rw-r--r--examples/ip_pipeline/config/network_layers.sh79
-rwxr-xr-xexamples/ip_pipeline/config/pipeline-to-core-mapping.py936
-rw-r--r--examples/ip_pipeline/config_check.c58
-rw-r--r--examples/ip_pipeline/config_parse.c1448
-rw-r--r--examples/ip_pipeline/init.c302
-rw-r--r--examples/ip_pipeline/parser.c745
-rw-r--r--examples/ip_pipeline/parser.h54
-rw-r--r--examples/ip_pipeline/pipeline.h11
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_fe.c640
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_fe.h26
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall.c1463
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall.h12
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall_be.c28
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions.c1507
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions.h11
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c22
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.c2084
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.h28
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c22
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master.c2
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master_be.c22
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough.c27
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.c39
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.c1845
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.h7
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.c102
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.h16
-rw-r--r--examples/ip_pipeline/pipeline_be.h51
-rw-r--r--examples/ip_pipeline/thread_fe.c39
-rw-r--r--examples/ip_reassembly/main.c6
-rw-r--r--examples/ipsec-secgw/Makefile8
-rw-r--r--examples/ipsec-secgw/esp.c248
-rw-r--r--examples/ipsec-secgw/esp.h9
-rw-r--r--examples/ipsec-secgw/ipip.h149
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c338
-rw-r--r--examples/ipsec-secgw/ipsec.c60
-rw-r--r--examples/ipsec-secgw/ipsec.h75
-rw-r--r--examples/ipsec-secgw/rt.c229
-rw-r--r--examples/ipsec-secgw/sa.c466
-rw-r--r--examples/ipsec-secgw/sp4.c (renamed from examples/ipsec-secgw/sp.c)171
-rw-r--r--examples/ipsec-secgw/sp6.c448
-rw-r--r--examples/ipv4_multicast/main.c4
-rw-r--r--examples/kni/main.c5
-rw-r--r--examples/l2fwd-crypto/main.c25
-rw-r--r--examples/l2fwd-ivshmem/host/host.c3
-rw-r--r--examples/l2fwd-jobstats/main.c5
-rw-r--r--examples/l2fwd-keepalive/Makefile5
-rw-r--r--examples/l2fwd-keepalive/ka-agent/Makefile49
-rw-r--r--examples/l2fwd-keepalive/ka-agent/main.c150
-rw-r--r--examples/l2fwd-keepalive/main.c25
-rw-r--r--examples/l2fwd-keepalive/shm.c131
-rw-r--r--examples/l2fwd-keepalive/shm.h89
-rw-r--r--examples/l2fwd/main.c26
-rw-r--r--examples/l3fwd-acl/main.c5
-rw-r--r--examples/l3fwd-power/main.c3
-rw-r--r--examples/l3fwd-vf/main.c2
-rw-r--r--examples/l3fwd/l3fwd_em.c2
-rw-r--r--examples/l3fwd/l3fwd_em_hlm_sse.h4
-rw-r--r--examples/l3fwd/main.c2
-rw-r--r--examples/link_status_interrupt/main.c3
-rw-r--r--examples/multi_process/l2fwd_fork/main.c6
-rw-r--r--examples/netmap_compat/lib/compat_netmap.c3
-rw-r--r--examples/packet_ordering/main.c18
-rw-r--r--examples/performance-thread/common/lthread.c2
-rw-r--r--examples/performance-thread/common/lthread_int.h12
-rw-r--r--examples/performance-thread/common/lthread_mutex.c3
-rw-r--r--examples/performance-thread/common/lthread_pool.h4
-rw-r--r--examples/performance-thread/common/lthread_queue.h2
-rw-r--r--examples/performance-thread/common/lthread_sched.c2
-rw-r--r--examples/performance-thread/common/lthread_tls.c4
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c55
-rw-r--r--examples/qos_meter/main.c16
-rw-r--r--examples/qos_meter/main.h2
-rw-r--r--examples/qos_sched/args.c14
-rw-r--r--examples/qos_sched/main.c2
-rw-r--r--examples/qos_sched/main.h5
-rw-r--r--examples/quota_watermark/qw/init.c2
-rw-r--r--examples/quota_watermark/qwctl/qwctl.c2
-rw-r--r--examples/tep_termination/main.c112
-rw-r--r--examples/tep_termination/main.h13
-rw-r--r--examples/tep_termination/vxlan_setup.c20
-rw-r--r--examples/tep_termination/vxlan_setup.h6
-rw-r--r--examples/vhost/main.c2370
-rw-r--r--examples/vhost/main.h70
-rw-r--r--examples/vhost_xen/main.c66
-rw-r--r--examples/vhost_xen/main.h11
-rw-r--r--examples/vm_power_manager/channel_manager.c1
-rw-r--r--examples/vmdq/main.c2
-rw-r--r--examples/vmdq_dcb/main.c2
119 files changed, 9995 insertions, 8632 deletions
diff --git a/examples/Makefile b/examples/Makefile
index b28b30e7..f650d3ec 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2014 6WIND S.A.
+# Copyright(c) 2016 6WIND S.A.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -64,6 +64,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += l2fwd-crypto
DIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += l2fwd-ivshmem
DIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += l2fwd-jobstats
DIRS-y += l2fwd-keepalive
+DIRS-y += l2fwd-keepalive/ka-agent
DIRS-$(CONFIG_RTE_LIBRTE_LPM) += l3fwd
DIRS-$(CONFIG_RTE_LIBRTE_ACL) += l3fwd-acl
ifeq ($(CONFIG_RTE_LIBRTE_LPM),y)
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index c0201a9e..24857f2d 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -52,19 +52,6 @@
#define BURST_SIZE 32
#define RTE_RING_SZ 1024
-/* uncommnet below line to enable debug logs */
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) do { \
- RTE_LOG(DEBUG, log_type, fmt, ##args); \
-} while (0)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
#define RTE_LOGTYPE_DISTRAPP RTE_LOGTYPE_USER1
/* mask of enabled ports */
@@ -178,19 +165,25 @@ struct lcore_params {
struct rte_mempool *mem_pool;
};
-static void
+static int
quit_workers(struct rte_distributor *d, struct rte_mempool *p)
{
const unsigned num_workers = rte_lcore_count() - 2;
unsigned i;
struct rte_mbuf *bufs[num_workers];
- rte_mempool_get_bulk(p, (void *)bufs, num_workers);
+
+ if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
+ printf("line %d: Error getting mbufs from pool\n", __LINE__);
+ return -1;
+ }
for (i = 0; i < num_workers; i++)
bufs[i]->hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
+
+ return 0;
}
static int
@@ -240,7 +233,8 @@ lcore_rx(struct lcore_params *p)
uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
- LOG_DEBUG(DISTRAPP, "%s:Packet loss due to full ring\n", __func__);
+ RTE_LOG(DEBUG, DISTRAPP,
+ "%s:Packet loss due to full ring\n", __func__);
while (sent < nb_ret)
rte_pktmbuf_free(bufs[sent++]);
}
@@ -258,7 +252,8 @@ lcore_rx(struct lcore_params *p)
* get packets till quit_signal is actually been
* received and they gracefully shutdown
*/
- quit_workers(d, mem_pool);
+ if (quit_workers(d, mem_pool) != 0)
+ return -1;
/* rx thread should quit at last */
return 0;
}
@@ -271,7 +266,8 @@ flush_one_port(struct output_buffer *outbuf, uint8_t outp)
app_stats.tx.tx_pkts += nb_tx;
if (unlikely(nb_tx < outbuf->count)) {
- LOG_DEBUG(DISTRAPP, "%s:Packet loss with tx_burst\n", __func__);
+ RTE_LOG(DEBUG, DISTRAPP,
+ "%s:Packet loss with tx_burst\n", __func__);
do {
rte_pktmbuf_free(outbuf->mbufs[nb_tx]);
} while (++nb_tx < outbuf->count);
@@ -588,7 +584,9 @@ main(int argc, char *argv[])
}
/* call lcore_main on master core only */
struct lcore_params p = { 0, d, output_ring, mbuf_pool};
- lcore_rx(&p);
+
+ if (lcore_rx(&p) != 0)
+ return -1;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c
index dc68989a..3c6112d7 100644
--- a/examples/dpdk_qat/main.c
+++ b/examples/dpdk_qat/main.c
@@ -661,8 +661,6 @@ main(int argc, char **argv)
return -1;
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_panic("check_port_config failed\n");
diff --git a/examples/ethtool/ethtool-app/ethapp.c b/examples/ethtool/ethtool-app/ethapp.c
index 2ed4796d..38e466c0 100644
--- a/examples/ethtool/ethtool-app/ethapp.c
+++ b/examples/ethtool/ethtool-app/ethapp.c
@@ -535,7 +535,6 @@ static void pcmd_portstats_callback(__rte_unused void *ptr_params,
}
stat = rte_ethtool_net_get_stats64(params->port, &stat_info);
if (stat == 0) {
- /* Most of rte_eth_stats is deprecated.. */
printf("Port %i stats\n", params->port);
printf(" In: %" PRIu64 " (%" PRIu64 " bytes)\n"
" Out: %"PRIu64" (%"PRIu64 " bytes)\n"
diff --git a/examples/ethtool/lib/Makefile b/examples/ethtool/lib/Makefile
index d7ee9555..5b4991e2 100644
--- a/examples/ethtool/lib/Makefile
+++ b/examples/ethtool/lib/Makefile
@@ -54,4 +54,8 @@ SRCS-y := rte_ethtool.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+# internal dependencies
+DEPDIRS-y += lib/librte_eal
+DEPDIRS-y += lib/librte_ether
+
include $(RTE_SDK)/mk/rte.extlib.mk
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 42e05f1f..54391f21 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -51,8 +51,7 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
if (drvinfo == NULL)
return -EINVAL;
- if (!rte_eth_dev_is_valid_port(port_id))
- return -ENODEV;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
@@ -120,8 +119,7 @@ rte_ethtool_get_link(uint8_t port_id)
{
struct rte_eth_link link;
- if (!rte_eth_dev_is_valid_port(port_id))
- return -ENODEV;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_link_get(port_id, &link);
return link.link_status;
}
@@ -267,8 +265,7 @@ rte_ethtool_net_open(uint8_t port_id)
int
rte_ethtool_net_stop(uint8_t port_id)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return -ENODEV;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_dev_stop(port_id);
return 0;
@@ -277,8 +274,7 @@ rte_ethtool_net_stop(uint8_t port_id)
int
rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return -ENODEV;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (addr == NULL)
return -EINVAL;
rte_eth_macaddr_get(port_id, addr);
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index bec98040..e5eedcc1 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -350,8 +350,7 @@ setup_port_lcore_affinities(void)
}
port_ids[i] = rx_port++;
- }
- else if (output_cores_mask & (1ULL << i)) {
+ } else if (output_cores_mask & (1ULL << (i & 0x3f))) {
/* Skip ports that are not enabled */
while ((ports_mask & (1 << tx_port)) == 0) {
tx_port++;
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 81a49187..2f452648 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -785,7 +785,7 @@ init_mem(void)
RTE_LOG(INFO, IP_FRAG, "Creating LPM6 table on socket %i\n", socket);
snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
- lpm6 = rte_lpm6_create("IP_FRAG_LPM6", socket, &lpm6_config);
+ lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
if (lpm6 == NULL) {
RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
return -1;
@@ -824,9 +824,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid arguments");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
- else if (nb_ports == 0)
+ if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No ports found!\n");
nb_lcores = rte_lcore_count();
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index 10fe1ba9..58271173 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -50,6 +50,7 @@ INC += $(wildcard *.h) $(wildcard pipeline/*.h)
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += parser.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse_tm.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_check.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += init.c
diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
index 55a98417..6a6fdd97 100644
--- a/examples/ip_pipeline/app.h
+++ b/examples/ip_pipeline/app.h
@@ -44,12 +44,24 @@
#include <cmdline_parse.h>
#include <rte_ethdev.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_kni.h>
+#endif
#include "cpu_core_map.h"
#include "pipeline.h"
#define APP_PARAM_NAME_SIZE PIPELINE_NAME_SIZE
#define APP_LINK_PCI_BDF_SIZE 16
+
+#ifndef APP_LINK_MAX_HWQ_IN
+#define APP_LINK_MAX_HWQ_IN 128
+#endif
+
+#ifndef APP_LINK_MAX_HWQ_OUT
+#define APP_LINK_MAX_HWQ_OUT 128
+#endif
+
struct app_mempool_params {
char *name;
uint32_t parsed;
@@ -69,6 +81,12 @@ struct app_link_params {
uint32_t tcp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
uint32_t udp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
uint32_t sctp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t rss_qs[APP_LINK_MAX_HWQ_IN];
+ uint32_t n_rss_qs;
+ uint64_t rss_proto_ipv4;
+ uint64_t rss_proto_ipv6;
+ uint64_t rss_proto_l2;
+ uint32_t promisc;
uint32_t state; /* DOWN = 0, UP = 1 */
uint32_t ip; /* 0 = Invalid */
uint32_t depth; /* Valid only when IP is valid */
@@ -76,7 +94,6 @@ struct app_link_params {
char pci_bdf[APP_LINK_PCI_BDF_SIZE];
struct rte_eth_conf conf;
- uint8_t promisc;
};
struct app_pktq_hwq_in_params {
@@ -118,6 +135,22 @@ struct app_pktq_swq_params {
uint32_t mempool_indirect_id;
};
+struct app_pktq_kni_params {
+ char *name;
+ uint32_t parsed;
+
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t hyper_th_id;
+ uint32_t force_bind;
+
+ uint32_t mempool_id; /* Position in the app->mempool_params */
+ uint32_t burst_read;
+ uint32_t burst_write;
+ uint32_t dropless;
+ uint64_t n_retries;
+};
+
#ifndef APP_FILE_NAME_SIZE
#define APP_FILE_NAME_SIZE 256
#endif
@@ -171,6 +204,7 @@ enum app_pktq_in_type {
APP_PKTQ_IN_HWQ,
APP_PKTQ_IN_SWQ,
APP_PKTQ_IN_TM,
+ APP_PKTQ_IN_KNI,
APP_PKTQ_IN_SOURCE,
};
@@ -183,6 +217,7 @@ enum app_pktq_out_type {
APP_PKTQ_OUT_HWQ,
APP_PKTQ_OUT_SWQ,
APP_PKTQ_OUT_TM,
+ APP_PKTQ_OUT_KNI,
APP_PKTQ_OUT_SINK,
};
@@ -191,9 +226,7 @@ struct app_pktq_out_params {
uint32_t id; /* Position in the appropriate app array */
};
-#ifndef APP_PIPELINE_TYPE_SIZE
-#define APP_PIPELINE_TYPE_SIZE 64
-#endif
+#define APP_PIPELINE_TYPE_SIZE PIPELINE_TYPE_SIZE
#define APP_MAX_PIPELINE_PKTQ_IN PIPELINE_MAX_PORT_IN
#define APP_MAX_PIPELINE_PKTQ_OUT PIPELINE_MAX_PORT_OUT
@@ -229,6 +262,22 @@ struct app_pipeline_params {
uint32_t n_args;
};
+struct app_params;
+
+typedef void (*app_link_op)(struct app_params *app,
+ uint32_t link_id,
+ uint32_t up,
+ void *arg);
+
+#ifndef APP_MAX_PIPELINES
+#define APP_MAX_PIPELINES 64
+#endif
+
+struct app_link_data {
+ app_link_op f_link[APP_MAX_PIPELINES];
+ void *arg[APP_MAX_PIPELINES];
+};
+
struct app_pipeline_data {
void *be;
void *fe;
@@ -247,7 +296,7 @@ struct app_thread_pipeline_data {
};
#ifndef APP_MAX_THREAD_PIPELINES
-#define APP_MAX_THREAD_PIPELINES 16
+#define APP_MAX_THREAD_PIPELINES 64
#endif
#ifndef APP_THREAD_TIMER_PERIOD
@@ -272,7 +321,7 @@ struct app_thread_data {
uint64_t headroom_time;
uint64_t headroom_cycles;
double headroom_ratio;
-};
+} __rte_cache_aligned;
#ifndef APP_MAX_LINKS
#define APP_MAX_LINKS 16
@@ -370,6 +419,8 @@ struct app_eal_params {
/* Support running on Xen dom0 without hugetlbfs */
uint32_t xen_dom0_present;
int xen_dom0;
+
+ uint32_t parsed;
};
#ifndef APP_APPNAME_SIZE
@@ -380,17 +431,9 @@ struct app_eal_params {
#define APP_MAX_MEMPOOLS 8
#endif
-#ifndef APP_LINK_MAX_HWQ_IN
-#define APP_LINK_MAX_HWQ_IN 64
-#endif
+#define APP_MAX_HWQ_IN (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
-#ifndef APP_LINK_MAX_HWQ_OUT
-#define APP_LINK_MAX_HWQ_OUT 64
-#endif
-
-#define APP_MAX_HWQ_IN (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
-
-#define APP_MAX_HWQ_OUT (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
+#define APP_MAX_HWQ_OUT (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
#ifndef APP_MAX_PKTQ_SWQ
#define APP_MAX_PKTQ_SWQ 256
@@ -398,24 +441,22 @@ struct app_eal_params {
#define APP_MAX_PKTQ_TM APP_MAX_LINKS
+#define APP_MAX_PKTQ_KNI APP_MAX_LINKS
+
#ifndef APP_MAX_PKTQ_SOURCE
-#define APP_MAX_PKTQ_SOURCE 16
+#define APP_MAX_PKTQ_SOURCE 64
#endif
#ifndef APP_MAX_PKTQ_SINK
-#define APP_MAX_PKTQ_SINK 16
+#define APP_MAX_PKTQ_SINK 64
#endif
#ifndef APP_MAX_MSGQ
-#define APP_MAX_MSGQ 64
-#endif
-
-#ifndef APP_MAX_PIPELINES
-#define APP_MAX_PIPELINES 64
+#define APP_MAX_MSGQ 256
#endif
#ifndef APP_EAL_ARGC
-#define APP_EAL_ARGC 32
+#define APP_EAL_ARGC 64
#endif
#ifndef APP_MAX_PIPELINE_TYPES
@@ -453,6 +494,7 @@ struct app_params {
struct app_pktq_hwq_out_params hwq_out_params[APP_MAX_HWQ_OUT];
struct app_pktq_swq_params swq_params[APP_MAX_PKTQ_SWQ];
struct app_pktq_tm_params tm_params[APP_MAX_PKTQ_TM];
+ struct app_pktq_kni_params kni_params[APP_MAX_PKTQ_KNI];
struct app_pktq_source_params source_params[APP_MAX_PKTQ_SOURCE];
struct app_pktq_sink_params sink_params[APP_MAX_PKTQ_SINK];
struct app_msgq_params msgq_params[APP_MAX_MSGQ];
@@ -464,6 +506,7 @@ struct app_params {
uint32_t n_pktq_hwq_out;
uint32_t n_pktq_swq;
uint32_t n_pktq_tm;
+ uint32_t n_pktq_kni;
uint32_t n_pktq_source;
uint32_t n_pktq_sink;
uint32_t n_msgq;
@@ -474,8 +517,12 @@ struct app_params {
struct cpu_core_map *core_map;
uint64_t core_mask;
struct rte_mempool *mempool[APP_MAX_MEMPOOLS];
+ struct app_link_data link_data[APP_MAX_LINKS];
struct rte_ring *swq[APP_MAX_PKTQ_SWQ];
struct rte_sched_port *tm[APP_MAX_PKTQ_TM];
+#ifdef RTE_LIBRTE_KNI
+ struct rte_kni *kni[APP_MAX_PKTQ_KNI];
+#endif /* RTE_LIBRTE_KNI */
struct rte_ring *msgq[APP_MAX_MSGQ];
struct pipeline_type pipeline_type[APP_MAX_PIPELINE_TYPES];
struct app_pipeline_data pipeline_data[APP_MAX_PIPELINES];
@@ -529,28 +576,6 @@ do \
sscanf(obj->name, prefix "%" SCNu32, &id); \
while (0) \
-#define APP_PARAM_ADD(obj_array, obj_name) \
-({ \
- ssize_t obj_idx; \
- const ssize_t obj_count = RTE_DIM(obj_array); \
- \
- obj_idx = APP_PARAM_FIND(obj_array, obj_name); \
- if (obj_idx < 0) { \
- for (obj_idx = 0; obj_idx < obj_count; obj_idx++) { \
- if (!APP_PARAM_VALID(&((obj_array)[obj_idx]))) \
- break; \
- } \
- \
- if (obj_idx < obj_count) { \
- (obj_array)[obj_idx].name = strdup(obj_name); \
- if ((obj_array)[obj_idx].name == NULL) \
- obj_idx = -EINVAL; \
- } else \
- obj_idx = -ENOMEM; \
- } \
- obj_idx; \
-})
-
#define APP_CHECK(exp, fmt, ...) \
do { \
if (!(exp)) { \
@@ -665,6 +690,41 @@ app_swq_get_readers(struct app_params *app, struct app_pktq_swq_params *swq)
return n_readers;
}
+static inline struct app_pipeline_params *
+app_swq_get_reader(struct app_params *app,
+ struct app_pktq_swq_params *swq,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SWQ) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
static inline uint32_t
app_tm_get_readers(struct app_params *app, struct app_pktq_tm_params *tm)
{
@@ -690,6 +750,101 @@ app_tm_get_readers(struct app_params *app, struct app_pktq_tm_params *tm)
return n_readers;
}
+static inline struct app_pipeline_params *
+app_tm_get_reader(struct app_params *app,
+ struct app_pktq_tm_params *tm,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_TM) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
+static inline uint32_t
+app_kni_get_readers(struct app_params *app, struct app_pktq_kni_params *kni)
+{
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_KNI) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline struct app_pipeline_params *
+app_kni_get_reader(struct app_params *app,
+ struct app_pktq_kni_params *kni,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_KNI) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
static inline uint32_t
app_source_get_readers(struct app_params *app,
struct app_pktq_source_params *source)
@@ -789,6 +944,42 @@ app_swq_get_writers(struct app_params *app, struct app_pktq_swq_params *swq)
return n_writers;
}
+static inline struct app_pipeline_params *
+app_swq_get_writer(struct app_params *app,
+ struct app_pktq_swq_params *swq,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SWQ) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
static inline uint32_t
app_tm_get_writers(struct app_params *app, struct app_pktq_tm_params *tm)
{
@@ -815,6 +1006,104 @@ app_tm_get_writers(struct app_params *app, struct app_pktq_tm_params *tm)
return n_writers;
}
+static inline struct app_pipeline_params *
+app_tm_get_writer(struct app_params *app,
+ struct app_pktq_tm_params *tm,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_TM) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
+static inline uint32_t
+app_kni_get_writers(struct app_params *app, struct app_pktq_kni_params *kni)
+{
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_KNI) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline struct app_pipeline_params *
+app_kni_get_writer(struct app_params *app,
+ struct app_pktq_kni_params *kni,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_KNI) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
static inline uint32_t
app_sink_get_writers(struct app_params *app, struct app_pktq_sink_params *sink)
{
@@ -913,6 +1202,26 @@ app_get_link_for_tm(struct app_params *app, struct app_pktq_tm_params *p_tm)
return &app->link_params[link_param_idx];
}
+static inline struct app_link_params *
+app_get_link_for_kni(struct app_params *app, struct app_pktq_kni_params *p_kni)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ uint32_t link_id;
+ ssize_t link_param_idx;
+
+ sscanf(p_kni->name, "KNI%" PRIu32, &link_id);
+ sprintf(link_name, "LINK%" PRIu32, link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p_kni->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+void app_pipeline_params_get(struct app_params *app,
+ struct app_pipeline_params *p_in,
+ struct pipeline_params *p_out);
+
int app_config_init(struct app_params *app);
int app_config_args(struct app_params *app,
@@ -932,6 +1241,8 @@ int app_config_check(struct app_params *app);
int app_init(struct app_params *app);
+int app_post_init(struct app_params *app);
+
int app_thread(void *arg);
int app_pipeline_type_register(struct app_params *app,
diff --git a/examples/ip_pipeline/config/action.cfg b/examples/ip_pipeline/config/action.cfg
new file mode 100644
index 00000000..994ae94a
--- /dev/null
+++ b/examples/ip_pipeline/config/action.cfg
@@ -0,0 +1,68 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; ________________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Flow |
+; RXQ2.0 --->| Actions |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |________________|
+;
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FLOW_ACTIONS
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+n_flows = 65536
+n_meters_per_flow = 4
+flow_id_offset = 286; ipdaddr
+ip_hdr_offset = 270
+color_offset = 128
diff --git a/examples/ip_pipeline/config/action.sh b/examples/ip_pipeline/config/action.sh
new file mode 100644
index 00000000..2986ae60
--- /dev/null
+++ b/examples/ip_pipeline/config/action.sh
@@ -0,0 +1,119 @@
+#
+# run ./config/action.sh
+#
+
+p 1 action flow 0 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 0 g G y Y r R
+p 1 action flow 0 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 1 g G y Y r R
+p 1 action flow 0 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 2 g G y Y r R
+p 1 action flow 0 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 3 g G y Y r R
+p 1 action flow 0 port 0
+
+p 1 action flow 1 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 0 g G y Y r R
+p 1 action flow 1 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 1 g G y Y r R
+p 1 action flow 1 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 2 g G y Y r R
+p 1 action flow 1 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 3 g G y Y r R
+p 1 action flow 1 port 1
+
+p 1 action flow 2 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 0 g G y Y r R
+p 1 action flow 2 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 1 g G y Y r R
+p 1 action flow 2 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 2 g G y Y r R
+p 1 action flow 2 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 3 g G y Y r R
+p 1 action flow 2 port 2
+
+p 1 action flow 3 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 0 g G y Y r R
+p 1 action flow 3 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 1 g G y Y r R
+p 1 action flow 3 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 2 g G y Y r R
+p 1 action flow 3 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 3 g G y Y r R
+p 1 action flow 3 port 3
+
+#p 1 action flow bulk ./config/action.txt
+
+#p 1 action flow ls
+
+p 1 action flow 0 stats
+p 1 action flow 1 stats
+p 1 action flow 2 stats
+p 1 action flow 3 stats
+
+p 1 action dscp 0 class 0 color G
+p 1 action dscp 1 class 1 color G
+p 1 action dscp 2 class 2 color G
+p 1 action dscp 3 class 3 color G
+p 1 action dscp 4 class 0 color G
+p 1 action dscp 5 class 1 color G
+p 1 action dscp 6 class 2 color G
+p 1 action dscp 7 class 3 color G
+p 1 action dscp 8 class 0 color G
+p 1 action dscp 9 class 1 color G
+p 1 action dscp 10 class 2 color G
+p 1 action dscp 11 class 3 color G
+p 1 action dscp 12 class 0 color G
+p 1 action dscp 13 class 1 color G
+p 1 action dscp 14 class 2 color G
+p 1 action dscp 15 class 3 color G
+p 1 action dscp 16 class 0 color G
+p 1 action dscp 17 class 1 color G
+p 1 action dscp 18 class 2 color G
+p 1 action dscp 19 class 3 color G
+p 1 action dscp 20 class 0 color G
+p 1 action dscp 21 class 1 color G
+p 1 action dscp 22 class 2 color G
+p 1 action dscp 23 class 3 color G
+p 1 action dscp 24 class 0 color G
+p 1 action dscp 25 class 1 color G
+p 1 action dscp 26 class 2 color G
+p 1 action dscp 27 class 3 color G
+p 1 action dscp 27 class 0 color G
+p 1 action dscp 29 class 1 color G
+p 1 action dscp 30 class 2 color G
+p 1 action dscp 31 class 3 color G
+p 1 action dscp 32 class 0 color G
+p 1 action dscp 33 class 1 color G
+p 1 action dscp 34 class 2 color G
+p 1 action dscp 35 class 3 color G
+p 1 action dscp 36 class 0 color G
+p 1 action dscp 37 class 1 color G
+p 1 action dscp 38 class 2 color G
+p 1 action dscp 39 class 3 color G
+p 1 action dscp 40 class 0 color G
+p 1 action dscp 41 class 1 color G
+p 1 action dscp 42 class 2 color G
+p 1 action dscp 43 class 3 color G
+p 1 action dscp 44 class 0 color G
+p 1 action dscp 45 class 1 color G
+p 1 action dscp 46 class 2 color G
+p 1 action dscp 47 class 3 color G
+p 1 action dscp 48 class 0 color G
+p 1 action dscp 49 class 1 color G
+p 1 action dscp 50 class 2 color G
+p 1 action dscp 51 class 3 color G
+p 1 action dscp 52 class 0 color G
+p 1 action dscp 53 class 1 color G
+p 1 action dscp 54 class 2 color G
+p 1 action dscp 55 class 3 color G
+p 1 action dscp 56 class 0 color G
+p 1 action dscp 57 class 1 color G
+p 1 action dscp 58 class 2 color G
+p 1 action dscp 59 class 3 color G
+p 1 action dscp 60 class 0 color G
+p 1 action dscp 61 class 1 color G
+p 1 action dscp 62 class 2 color G
+p 1 action dscp 63 class 3 color G
+
+p 1 action dscp ls
diff --git a/examples/ip_pipeline/config/action.txt b/examples/ip_pipeline/config/action.txt
new file mode 100644
index 00000000..f14207b9
--- /dev/null
+++ b/examples/ip_pipeline/config/action.txt
@@ -0,0 +1,8 @@
+#
+# p <pipelineid> action flow bulk ./config/action.txt
+#
+
+flow 0 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 0
+flow 1 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 1
+flow 2 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 2
+flow 3 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 3
diff --git a/examples/ip_pipeline/config/edge_router_downstream.cfg b/examples/ip_pipeline/config/edge_router_downstream.cfg
index 85bbab8f..c6b4e1f2 100644
--- a/examples/ip_pipeline/config/edge_router_downstream.cfg
+++ b/examples/ip_pipeline/config/edge_router_downstream.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -36,9 +36,9 @@
; network) contains the following functional blocks: Packet RX & Routing,
; Traffic management and Packet TX. The input packets are assumed to be
; IPv4, while the output packets are Q-in-Q IPv4.
-
+;
; A simple implementation for this functional pipeline is presented below.
-
+;
; Packet Rx & Traffic Management Packet Tx
; Routing (Pass-Through) (Pass-Through)
; _____________________ SWQ0 ______________________ SWQ4 _____________________
@@ -50,11 +50,23 @@
; | | SWQ3 | | SWQ7 | |
; RXQ3.0 --->| |----->| |----->| |---> TXQ3.0
; |_____________________| |______________________| |_____________________|
-; | _|_ ^ _|_ ^ _|_ ^ _|_ ^
-; | |___|||___|||___|||___||
-; +--> SINK0 |___|||___|||___|||___||
-; (route miss) |__| |__| |__| |__|
-; TM0 TM1 TM2 TM3
+; | | ^ | ^ | ^ | ^
+; | |__| |__| |__| |__|
+; +--> SINK0 TM0 TM1 TM2 TM3
+; (Default)
+;
+; Input packet: Ethernet/IPv4
+; Output packet: Ethernet/QinQ/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
[PIPELINE0]
type = MASTER
@@ -67,7 +79,7 @@ pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
pktq_out = SWQ0 SWQ1 SWQ2 SWQ3 SINK0
encap = ethernet_qinq
qinq_sched = test
-ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+ip_hdr_offset = 270
[PIPELINE2]
type = PASS-THROUGH
diff --git a/examples/ip_pipeline/config/edge_router_downstream.sh b/examples/ip_pipeline/config/edge_router_downstream.sh
index ce46beb5..67c3a0d1 100644
--- a/examples/ip_pipeline/config/edge_router_downstream.sh
+++ b/examples/ip_pipeline/config/edge_router_downstream.sh
@@ -1,3 +1,7 @@
+#
+# run ./config/edge_router_downstream.sh
+#
+
################################################################################
# Routing: Ether QinQ, ARP off
################################################################################
@@ -6,5 +10,4 @@ p 1 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 256 257
p 1 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 258 259
p 1 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 260 261
p 1 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 262 263
-
-p 1 route ls
+#p 1 route ls
diff --git a/examples/ip_pipeline/config/edge_router_upstream.cfg b/examples/ip_pipeline/config/edge_router_upstream.cfg
index a08c5cce..dea42b95 100644
--- a/examples/ip_pipeline/config/edge_router_upstream.cfg
+++ b/examples/ip_pipeline/config/edge_router_upstream.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -29,6 +29,7 @@
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
; An edge router typically sits between two networks such as the provider
; core network and the provider access network. A typical packet processing
; pipeline for the upstream traffic (i.e. traffic from access to core
@@ -36,10 +37,10 @@
; Flow classification, Metering, Routing and Packet TX. The input packets
; are assumed to be Q-in-Q IPv4, while the output packets are MPLS IPv4
; (with variable number of labels per route).
-
+;
; A simple implementation for this functional pipeline is presented below.
-
-; Packet Rx & Pass-Through Flow-Classification Flow-Actions Routing
+;
+; Packet RX & Pass-Through Flow Classification Flow Actions Routing
: Firewall
; __________ SWQ0 __________ SWQ4 __________ SWQ8 __________ SWQ12 __________
; RXQ0.0 --->| |------>| |------>| |------>| |------>| |------> TXQ0.0
@@ -51,8 +52,21 @@
; RXQ3.0 --->| |------>| |------>| |------>| |------>| |------> TXQ3.0
; |__________| |__________| |__________| |__________| |__________|
; | | |
-; +--> SINK0 (Default) +--> SINK1 (Default) +--> SINK2 (Route Miss)
+; +--> SINK0 (Default) +--> SINK1 (Default) +--> SINK2 (Default)
+;
+; Input packet: Ethernet/QinQ/IPv4
+; Output packet: Ethernet/MPLS/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 QinQ header 270 8
+; 4 IPv4 header 278 20
+[EAL]
+log_level = 0
[PIPELINE0]
type = MASTER
@@ -72,10 +86,10 @@ core = 2
pktq_in = SWQ0 SWQ1 SWQ2 SWQ3
pktq_out = SWQ4 SWQ5 SWQ6 SWQ7
dma_size = 8
-dma_dst_offset = 128; mbuf (128)
-dma_src_offset = 268; mbuf (128) + headroom (128) + 1st ethertype offset (12) = 268
+dma_dst_offset = 128
+dma_src_offset = 268; 1st Ethertype offset
dma_src_mask = 00000FFF00000FFF; qinq
-dma_hash_offset = 136; dma_dst_offset + dma_size = 136
+dma_hash_offset = 136; dma_dst_offset + dma_size
[PIPELINE3]
type = FLOW_CLASSIFICATION
@@ -86,7 +100,7 @@ n_flows = 65536
key_size = 8; dma_size
key_offset = 128; dma_dst_offset
hash_offset = 136; dma_hash_offset
-flowid_offset = 192; mbuf (128) + 64
+flowid_offset = 192
[PIPELINE4]
type = FLOW_ACTIONS
@@ -96,7 +110,7 @@ pktq_out = SWQ12 SWQ13 SWQ14 SWQ15
n_flows = 65536
n_meters_per_flow = 1
flow_id_offset = 192; flowid_offset
-ip_hdr_offset = 278; mbuf (128) + headroom (128) + ethernet (14) + qinq (8) = 278
+ip_hdr_offset = 278
color_offset = 196; flowid_offset + sizeof(flow_id)
[PIPELINE5]
@@ -106,5 +120,5 @@ pktq_in = SWQ12 SWQ13 SWQ14 SWQ15
pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2
encap = ethernet_mpls
mpls_color_mark = yes
-ip_hdr_offset = 278; mbuf (128) + headroom (128) + ethernet (14) + qinq (8) = 278
+ip_hdr_offset = 278
color_offset = 196; flowid_offset + sizeof(flow_id)
diff --git a/examples/ip_pipeline/config/edge_router_upstream.sh b/examples/ip_pipeline/config/edge_router_upstream.sh
index eeba600c..5d574c1a 100644
--- a/examples/ip_pipeline/config/edge_router_upstream.sh
+++ b/examples/ip_pipeline/config/edge_router_upstream.sh
@@ -1,24 +1,26 @@
-################################################
-# Firewall Rules:4 for 4 ports
-################################################
-p 1 firewall add ipv4 1 0.0.0.0 8 0.0.0.0 10 0 0 0 0 6 1 0
-p 1 firewall add ipv4 1 0.0.0.0 8 0.64.0.0 10 0 0 0 0 6 1 1
-p 1 firewall add ipv4 1 0.0.0.0 8 0.128.0.0 10 0 0 0 0 6 1 2
-p 1 firewall add ipv4 1 0.0.0.0 8 0.192.0.0 10 0 0 0 0 6 1 3
-p 1 firewall add default 4 #SINK0
+#
+# run ./config/edge_router_upstream.sh
+#
+################################################################################
+# Firewall
+################################################################################
+p 1 firewall add default 4 #SINK0
+p 1 firewall add bulk ./config/edge_router_upstream_firewall.txt
+#p 1 firewall ls
################################################################################
-# Flow classification
+# Flow Classification
################################################################################
p 3 flow add default 4 #SINK1
-p 3 flow add qinq all 65536 4
+p 3 flow add qinq bulk ./config/edge_router_upstream_flow.txt
+#p 3 flow ls
################################################################################
-# Flow Actions - Metering
+# Flow Actions - Metering and Policing
################################################################################
-p 4 flows 65536 meter 0 trtcm 1250000000 1250000000 100000000 100000000
-p 4 flows 65536 ports 4
+p 4 action flow bulk ./config/edge_router_upstream_action.txt
+#p 4 action flow ls
################################################################################
# Routing: Ether MPLS, ARP off
@@ -28,11 +30,4 @@ p 5 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 0:1
p 5 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 10:11
p 5 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 20:21
p 5 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 30:31
-
-################################################################################
-# List all configurations
-################################################################################
-p 1 firewall ls
-#p 3 flow ls
-#p 4 flow actions ls
-p 5 route ls
+#p 5 route ls
diff --git a/examples/ip_pipeline/config/firewall.cfg b/examples/ip_pipeline/config/firewall.cfg
new file mode 100644
index 00000000..2f5dd9f6
--- /dev/null
+++ b/examples/ip_pipeline/config/firewall.cfg
@@ -0,0 +1,68 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; _______________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Firewall |
+; RXQ2.0 --->| |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |_______________|
+; |
+; +-----------> SINK0 (default rule)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FIREWALL
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+n_rules = 4096
+pkt_type = ipv4
+;pkt_type = vlan_ipv4
+;pkt_type = qinq_ipv4
diff --git a/examples/ip_pipeline/config/firewall.sh b/examples/ip_pipeline/config/firewall.sh
new file mode 100644
index 00000000..c83857ee
--- /dev/null
+++ b/examples/ip_pipeline/config/firewall.sh
@@ -0,0 +1,13 @@
+#
+# run ./config/firewall.sh
+#
+
+p 1 firewall add default 4 #SINK0
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 0xF port 0
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 0xF port 1
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 0xF port 2
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 0xF port 3
+
+#p 1 firewall add bulk ./config/firewall.txt
+
+p 1 firewall ls
diff --git a/examples/ip_pipeline/config/firewall.txt b/examples/ip_pipeline/config/firewall.txt
new file mode 100644
index 00000000..54cfffda
--- /dev/null
+++ b/examples/ip_pipeline/config/firewall.txt
@@ -0,0 +1,9 @@
+#
+# p <pipelineid> firewall add bulk ./config/firewall.txt
+# p <pipelineid> firewall del bulk ./config/firewall.txt
+#
+
+priority 1 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 0xF port 0
+priority 1 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 0xF port 1
+priority 1 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 0xF port 2
+priority 1 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 0xF port 3
diff --git a/examples/ip_pipeline/config/flow.cfg b/examples/ip_pipeline/config/flow.cfg
new file mode 100644
index 00000000..6895d393
--- /dev/null
+++ b/examples/ip_pipeline/config/flow.cfg
@@ -0,0 +1,72 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; ________________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Flow |
+; RXQ2.0 --->| Classification |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |________________|
+; |
+; +-----------> SINK0 (flow lookup miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 QinQ/IPv4/IPv6 header 270 8/20/40
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FLOW_CLASSIFICATION
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+n_flows = 65536
+;key_size = 8 ; QinQ key size
+;key_offset = 270 ; QinQ key offset
+;key_mask = 0000FFF00000FFF0 ; QinQ key mask
+key_size = 16 ; IPv4 5-tuple key size
+key_offset = 278 ; IPv4 5-tuple key offset
+key_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF ; IPv4 5-tuple key mask
+flowid_offset = 128
diff --git a/examples/ip_pipeline/config/flow.sh b/examples/ip_pipeline/config/flow.sh
new file mode 100644
index 00000000..489c7079
--- /dev/null
+++ b/examples/ip_pipeline/config/flow.sh
@@ -0,0 +1,25 @@
+#
+# run ./config/flow.sh
+#
+
+################################################################################
+# Flow classification (QinQ)
+################################################################################
+#p 1 flow add default 4 #SINK0
+#p 1 flow add qinq 100 200 port 0 id 0
+#p 1 flow add qinq 101 201 port 1 id 1
+#p 1 flow add qinq 102 202 port 2 id 2
+#p 1 flow add qinq 103 203 port 3 id 3
+
+#p 1 flow add qinq bulk ./config/flow.txt
+
+################################################################################
+# Flow classification (IPv4 5-tuple)
+################################################################################
+p 1 flow add default 4 #SINK0
+p 1 flow add ipv4 100.0.0.10 200.0.0.10 100 200 6 port 0 id 0
+p 1 flow add ipv4 100.0.0.11 200.0.0.11 101 201 6 port 1 id 1
+p 1 flow add ipv4 100.0.0.12 200.0.0.12 102 202 6 port 2 id 2
+p 1 flow add ipv4 100.0.0.13 200.0.0.13 103 203 6 port 3 id 3
+
+#p 1 flow add ipv4 bulk ./config/flow.txt
diff --git a/examples/ip_pipeline/config/flow.txt b/examples/ip_pipeline/config/flow.txt
new file mode 100644
index 00000000..c1a141dd
--- /dev/null
+++ b/examples/ip_pipeline/config/flow.txt
@@ -0,0 +1,17 @@
+#
+# p <pipelineid> flow add qinq bulk ./config/flow.txt
+#
+
+#qinq 100 200 port 0 id 0
+#qinq 101 201 port 1 id 1
+#qinq 102 202 port 2 id 2
+#qinq 103 203 port 3 id 3
+
+#
+# p <pipelineid> flow add ipv4 bulk ./config/flow.txt
+#
+
+ipv4 100.0.0.10 200.0.0.10 100 200 6 port 0 id 0
+ipv4 100.0.0.11 200.0.0.11 101 201 6 port 1 id 1
+ipv4 100.0.0.12 200.0.0.12 102 202 6 port 2 id 2
+ipv4 100.0.0.13 200.0.0.13 103 203 6 port 3 id 3
diff --git a/examples/ip_pipeline/config/kni.cfg b/examples/ip_pipeline/config/kni.cfg
new file mode 100644
index 00000000..cea208b4
--- /dev/null
+++ b/examples/ip_pipeline/config/kni.cfg
@@ -0,0 +1,67 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;
+; ______________ ______________________
+; | | KNI0 | |
+; RXQ0.0 --->| |------->|--+ |
+; | | KNI1 | | br0 |
+; TXQ1.0 <---| |<-------|<-+ |
+; | Pass-through | | Linux Kernel |
+; | (P1) | | Network Stack |
+; | | KNI1 | |
+; RXQ1.0 --->| |------->|--+ |
+; | | KNI0 | | br0 |
+; TXQ0.0 <---| |<-------|<-+ |
+; |______________| |______________________|
+;
+; Insert Linux kernel KNI module:
+; [Linux]$ insmod rte_kni.ko
+;
+; Configure Linux kernel bridge between KNI0 and KNI1 interfaces:
+; [Linux]$ ifconfig KNI0 up
+; [Linux]$ ifconfig KNI1 up
+; [Linux]$ brctl addbr "br0"
+; [Linux]$ brctl addif br0 KNI0
+; [Linux]$ brctl addif br0 KNI1
+; [Linux]$ ifconfig br0 up
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = RXQ0.0 KNI1 RXQ1.0 KNI0
+pktq_out = KNI0 TXQ1.0 KNI1 TXQ0.0
diff --git a/examples/ip_pipeline/config/l2fwd.cfg b/examples/ip_pipeline/config/l2fwd.cfg
index c743a143..a1df9e6a 100644
--- a/examples/ip_pipeline/config/l2fwd.cfg
+++ b/examples/ip_pipeline/config/l2fwd.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,9 @@
; |________________|
;
+[EAL]
+log_level = 0
+
[PIPELINE0]
type = MASTER
core = 0
diff --git a/examples/ip_pipeline/config/l3fwd.cfg b/examples/ip_pipeline/config/l3fwd.cfg
index 5449dc32..02c8f36f 100644
--- a/examples/ip_pipeline/config/l3fwd.cfg
+++ b/examples/ip_pipeline/config/l3fwd.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -50,6 +50,9 @@
; 2 Ethernet header 256 14
; 3 IPv4 header 270 20
+[EAL]
+log_level = 0
+
[PIPELINE0]
type = MASTER
core = 0
@@ -59,5 +62,7 @@ type = ROUTING
core = 1
pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
-encap = ethernet; encap = ethernet / ethernet_qinq / ethernet_mpls
+encap = ethernet
+;encap = ethernet_qinq
+;encap = ethernet_mpls
ip_hdr_offset = 270
diff --git a/examples/ip_pipeline/config/l3fwd.sh b/examples/ip_pipeline/config/l3fwd.sh
index 27740103..47406aa4 100644
--- a/examples/ip_pipeline/config/l3fwd.sh
+++ b/examples/ip_pipeline/config/l3fwd.sh
@@ -1,9 +1,33 @@
+#
+# run ./config/l3fwd.sh
+#
+
################################################################################
# Routing: encap = ethernet, arp = off
################################################################################
p 1 route add default 4 #SINK0
-p 1 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0
-p 1 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1
-p 1 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2
-p 1 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3
+p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0
+p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1
+p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2
+p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3
p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_qinq, arp = off
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 1000 2000
+#p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 1001 2001
+#p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 1002 2002
+#p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 1003 2003
+#p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_mpls, arp = off
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 1000:2000
+#p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 1001:2001
+#p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 1002:2002
+#p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 1003:2003
+#p 1 route ls
diff --git a/examples/ip_pipeline/config/l3fwd_arp.cfg b/examples/ip_pipeline/config/l3fwd_arp.cfg
new file mode 100644
index 00000000..2c63c8fd
--- /dev/null
+++ b/examples/ip_pipeline/config/l3fwd_arp.cfg
@@ -0,0 +1,70 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; _______________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Routing |
+; RXQ2.0 --->| |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |_______________|
+; |
+; +-----------> SINK0 (route miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+encap = ethernet
+;encap = ethernet_qinq
+;encap = ethernet_mpls
+n_arp_entries = 1024
+ip_hdr_offset = 270
+arp_key_offset = 128
diff --git a/examples/ip_pipeline/config/l3fwd_arp.sh b/examples/ip_pipeline/config/l3fwd_arp.sh
new file mode 100644
index 00000000..20bea582
--- /dev/null
+++ b/examples/ip_pipeline/config/l3fwd_arp.sh
@@ -0,0 +1,43 @@
+#
+# run ./config/l3fwd_arp.sh
+#
+
+################################################################################
+# ARP
+################################################################################
+p 1 arp add default 4 #SINK0
+p 1 arp add 0 10.0.0.1 a0:b0:c0:d0:e0:f0
+p 1 arp add 1 11.0.0.1 a1:b1:c1:d1:e1:f1
+p 1 arp add 2 12.0.0.1 a2:b2:c2:d2:e2:f2
+p 1 arp add 3 13.0.0.1 a3:b3:c3:d3:e3:f3
+p 1 arp ls
+
+################################################################################
+# Routing: encap = ethernet, arp = on
+################################################################################
+p 1 route add default 4 #SINK0
+p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1
+p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1
+p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1
+p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1
+p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_qinq, arp = on
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1 qinq 1000 2000
+#p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1 qinq 1001 2001
+#p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1 qinq 1002 2002
+#p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1 qinq 1003 2003
+#p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_mpls, arp = on
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1 mpls 1000:2000
+#p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1 mpls 1001:2001
+#p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1 mpls 1002:2002
+#p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1 mpls 1003:2003
+#p 1 route ls
diff --git a/examples/ip_pipeline/config/network_layers.cfg b/examples/ip_pipeline/config/network_layers.cfg
new file mode 100644
index 00000000..8054d9fe
--- /dev/null
+++ b/examples/ip_pipeline/config/network_layers.cfg
@@ -0,0 +1,223 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; The diagram below shows how additional protocol components can be plugged into
+; the IP layer implemented by the ip_pipeline application. Pick your favorite
+; open source components for dynamic ARP, ICMP, UDP or TCP termination, etc and
+; connect them through SWQs to the IP infrastructure.
+;
+; The input packets with local destination are sent to the UDP/TCP applications
+; while the input packets with remote destination are routed back to the
+; network. Additional features can easily be added to this setup:
+; * IP Reassembly: add SWQs with IP reassembly enabled (typically required for
+; the input traffic with local destination);
+; * IP Fragmentation: add SWQs with IP fragmentation enabled (typically
+; required to enforce the MTU for the routed output traffic);
+; * Traffic Metering: add Flow Action pipeline instances (e.g. for metering the
+; TCP connections or ICMP input traffic);
+; * Traffic Management: add TMs for the required output LINKs;
+; * Protocol encapsulations (QinQ, MPLS) for the output packets: part of the
+; routing pipeline configuration.
+;
+; _________ _________
+; | | | |
+; | UDP | | TCP |
+; | App | | App |
+; |_________| |_________|
+; ^ | ^ |
+; __|___V__ __|___V__
+; | | SWQ0 (UDP TX) | | SWQ1 (TCP TX)
+; | UDP |-------+ | TCP |------------+
+; | | | | | |
+; |_________| | |_________| |
+; ^ | ^ |
+; | SWQ2 | | SWQ3 |
+; | (UDP RX) | | (TCP RX) |
+; ____|____ | ____|____ |
+; | | | | | |
+; RXQ<0..3>.1 ------>|Firewall +--->| | +------>| Flow +--->| |
+; (UDP local dest) | (P2) | SINK0 | | | (P3) | SINK1 |
+; |_________| (Deny)| | |_________| (RST) |
+; RXQ<0..3>.2 -------------------------|-----+ |
+; (TCP local dest) | |
+; | +------------------------------+
+; | |
+; _V_____V_
+; | |
+; | Routing | TXQ<0..3>.0
+; RXQ<0..3>.0 ---------------------->| & ARP +----------------------------->
+; (IP remote dest) | (P1) |
+; |_________|
+; | ^ |
+; SWQ4 +-------------+ | | SWQ5 (ARP miss)
+; (Route miss) | | +------------+
+; | +-------------+ |
+; ___V__|__ SWQ6 ____V____
+; | | (ICMP TX) | | TXQ<0..3>.1
+; RXQ<0..3>.3 ------>| ICMP | +------>| Dyn ARP +------------->
+; (IP local dest) | | | | |
+; |_________| | |_________|
+; RXQ<0..3>.4 -------------------------------+
+; (ARP)
+;
+; This configuration file implements the diagram presented below, where the
+; dynamic ARP, ICMP, UDP and TCP components have been stubbed out and replaced
+; with loop-back and packet drop devices.
+;
+; _________ _________
+; | | SWQ0 (UDP TX) | | SWQ1 (TCP TX)
+; |Loobpack |-------+ |Loopback |------------+
+; | (P4) | | | (P5) | |
+; |_________| | |_________| |
+; ^ | ^ |
+; | SWQ2 | | SWQ3 |
+; | (UDP RX) | | (TCP RX) |
+; ____|____ | ____|____ |
+; | | | | | |
+; RXQ<0..3>.1 ------>|Firewall +--->| | +------>| Flow +--->| |
+; (UDP local dest) | (P2) | SINK0 | | | (P3) | SINK1 |
+; |_________| (Deny)| | |_________| (RST) |
+; RXQ<0..3>.2 -------------------------|-----+ |
+; (TCP local dest) | |
+; | +------------------------------+
+; | |
+; _V_____V_
+; | |
+; | Routing | TXQ<0..3>.0
+; RXQ<0..3>.0 ---------------------->| & ARP +----------------------------->
+; (IP remote dest) | (P1) |
+; |_________|
+; | |
+; SINK2 |<---+ +--->| SINK3
+; (Route miss) (ARP miss)
+;
+; _________ _________
+; | | | |
+; RXQ<0..3>.3 ------>| Drop +--->| SINK<4..7> +------>| Drop +--->| SINK<8..11>
+; (IP local dest) | (P6) | (IP local dest) | | (P7) | (ARP)
+; |_________| | |_________|
+; RXQ<0..3>.4 ------------------------------------+
+; (ARP)
+;
+;
+; Input packet: Ethernet/IPv4 or Ethernet/ARP
+; Output packet: Ethernet/IPv4 or Ethernet/ARP
+;
+; Packet buffer layout (for input IPv4 packets):
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+; 4 ICMP/UDP/TCP header 290 8/8/20
+
+[EAL]
+log_level = 0
+
+[LINK0]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK1]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK2]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK3]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0 SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2 SINK3
+port_local_dest = 4 ; SINK2 (Drop)
+n_arp_entries = 1000
+ip_hdr_offset = 270
+arp_key_offset = 128
+
+[PIPELINE2]
+type = FIREWALL
+core = 1
+pktq_in = RXQ0.1 RXQ1.1 RXQ2.1 RXQ3.1
+pktq_out = SWQ2 SINK0
+n_rules = 4096
+
+[PIPELINE3]
+type = FLOW_CLASSIFICATION
+core = 1
+pktq_in = RXQ0.2 RXQ1.2 RXQ2.2 RXQ3.2
+pktq_out = SWQ3 SINK1
+n_flows = 65536
+key_size = 16 ; IPv4 5-tuple key size
+key_offset = 278 ; IPv4 5-tuple key offset
+key_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF ; IPv4 5-tuple key mask
+flowid_offset = 128 ; Flow ID effectively acts as TCP socket ID
+
+[PIPELINE4]
+type = PASS-THROUGH ; Loop-back (UDP place-holder)
+core = 1
+pktq_in = SWQ2
+pktq_out = SWQ0
+
+[PIPELINE5]
+type = PASS-THROUGH ; Loop-back (TCP place-holder)
+core = 1
+pktq_in = SWQ3
+pktq_out = SWQ1
+
+[PIPELINE6]
+type = PASS-THROUGH ; Drop (ICMP place-holder)
+core = 1
+pktq_in = RXQ0.3 RXQ1.3 RXQ2.3 RXQ3.3
+pktq_out = SINK4 SINK5 SINK6 SINK7
+
+[PIPELINE7]
+type = PASS-THROUGH ; Drop (Dynamic ARP place-holder)
+core = 1
+pktq_in = RXQ0.4 RXQ1.4 RXQ2.4 RXQ3.4
+pktq_out = SINK8 SINK9 SINK10 SINK11
diff --git a/examples/ip_pipeline/config/network_layers.sh b/examples/ip_pipeline/config/network_layers.sh
new file mode 100644
index 00000000..3b86bebd
--- /dev/null
+++ b/examples/ip_pipeline/config/network_layers.sh
@@ -0,0 +1,79 @@
+#
+# run ./config/network_layers.sh
+#
+
+################################################################################
+# Link configuration
+################################################################################
+# Routes added implicitly when links are brought UP:
+# IP Prefix = 10.0.0.1/16 => (Port 0, Local)
+# IP Prefix = 10.0.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.1.0.1/16 => (Port 1, Local)
+# IP Prefix = 10.1.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.2.0.1/16 => (Port 2, Local)
+# IP Prefix = 10.2.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.3.0.1/16 => (Port 3, Local)
+# IP Prefix = 10.3.0.1/32 => (Port 4, Local)
+link 0 down
+link 1 down
+link 2 down
+link 3 down
+link 0 config 10.0.0.1 16
+link 1 config 10.1.0.1 16
+link 2 config 10.2.0.1 16
+link 3 config 10.3.0.1 16
+link 0 up
+link 1 up
+link 2 up
+link 3 up
+#link ls
+
+################################################################################
+# Static ARP
+################################################################################
+p 1 arp add default 5 #SINK3
+p 1 arp add 0 10.0.0.2 a0:b0:c0:d0:e0:f0
+p 1 arp add 1 10.1.0.2 a1:b1:c1:d1:e1:f1
+p 1 arp add 2 10.2.0.2 a2:b2:c2:d2:e2:f2
+p 1 arp add 3 10.3.0.2 a3:b3:c3:d3:e3:f3
+#p 1 arp ls
+
+################################################################################
+# Routes
+################################################################################
+p 1 route add default 4 #SINK2
+p 1 route add 100.0.0.0 16 port 0 ether 10.0.0.2
+p 1 route add 100.1.0.0 16 port 1 ether 10.1.0.2
+p 1 route add 100.2.0.0 16 port 2 ether 10.2.0.2
+p 1 route add 100.3.0.0 16 port 3 ether 10.3.0.2
+#p 1 route ls
+
+################################################################################
+# Local destination UDP traffic
+################################################################################
+# Prio = Lowest: [SA = ANY, DA = ANY, SP = ANY, DP = ANY, PROTO = ANY] => Drop
+# Prio = 1 (High): [SA = ANY, DA = 10.0.0.1, SP = ANY, DP = 1000, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.1.0.1, SP = ANY, DP = 1001, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.2.0.1, SP = ANY, DP = 1002, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.3.0.1, SP = ANY, DP = 1003, PROTO = UDP] => Allow
+p 1 firewall add default 1 #SINK0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.0.0.1 32 0 65535 1000 1000 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.1.0.1 32 0 65535 1001 1001 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.2.0.1 32 0 65535 1002 1002 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.3.0.1 32 0 65535 1003 1003 17 0xF port 0
+#p 2 firewall ls
+
+################################################################################
+# Local destination TCP traffic
+################################################################################
+# Unknown connection => Drop
+# TCP [SA = 100.0.0.10, DA = 10.0.0.1, SP = 1000, DP = 80] => socket ID = 0
+# TCP [SA = 100.1.0.10, DA = 10.1.0.1, SP = 1001, DP = 80] => socket ID = 1
+# TCP [SA = 100.2.0.10, DA = 10.2.0.1, SP = 1002, DP = 80] => socket ID = 2
+# TCP [SA = 100.3.0.10, DA = 10.3.0.1, SP = 1003, DP = 80] => socket ID = 3
+p 3 flow add default 1 #SINK1
+p 3 flow add ipv4 100.0.0.10 10.0.0.1 1000 80 6 port 1 id 0
+p 3 flow add ipv4 100.1.0.10 10.1.0.1 1001 80 6 port 1 id 1
+p 3 flow add ipv4 100.2.0.10 10.2.0.1 1002 80 6 port 1 id 2
+p 3 flow add ipv4 100.3.0.10 10.3.0.1 1003 80 6 port 1 id 3
+#p 3 flow ls
diff --git a/examples/ip_pipeline/config/pipeline-to-core-mapping.py b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
new file mode 100755
index 00000000..37b131c6
--- /dev/null
+++ b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
@@ -0,0 +1,936 @@
+#! /usr/bin/python2
+
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# This script maps the set of pipelines identified (MASTER pipelines are
+# ignored) from the input configuration file to the set of cores
+# provided as input argument and creates configuration files for each of
+# the mapping combinations.
+#
+
+from __future__ import print_function
+import sys
+import errno
+import os
+import re
+import array
+import itertools
+import re
+import argparse
+from collections import namedtuple
+
+# default values
+enable_stage0_traceout = 1
+enable_stage1_traceout = 1
+enable_stage2_traceout = 1
+
+enable_stage1_fileout = 1
+enable_stage2_fileout = 1
+
+Constants = namedtuple('Constants', ['MAX_CORES', 'MAX_PIPELINES'])
+constants = Constants(16, 64)
+
+# pattern for physical core
+pattern_phycore = '^(s|S)\d(c|C)[1-9][0-9]*$'
+reg_phycore = re.compile(pattern_phycore)
+
+
+def popcount(mask):
+ return bin(mask).count("1")
+
+
+def len2mask(length):
+ if (length == 0):
+ return 0
+
+ if (length > 64):
+ sys.exit('error: len2mask - length %i > 64. exiting' % length)
+
+ return int('1' * length, 2)
+
+
+def bitstring_write(n, n_bits):
+ tmpstr = ""
+ if (n_bits > 64):
+ return
+
+ i = n_bits - 1
+ while (i >= 0):
+ cond = (n & (1 << i))
+ if (cond):
+ print('1', end='')
+ tmpstr += '1'
+ else:
+ print('0', end='')
+ tmpstr += '0'
+ i -= 1
+ return tmpstr
+
+
+class Cores0:
+
+ def __init__(self):
+ self.n_pipelines = 0
+
+
+class Cores1:
+
+ def __init__(self):
+ self.pipelines = 0
+ self.n_pipelines = 0
+
+
+class Cores2:
+
+ def __init__(self):
+ self.pipelines = 0
+ self.n_pipelines = 0
+ self.counter = 0
+ self.counter_max = 0
+ self.bitpos = array.array(
+ "L", itertools.repeat(0, constants.MAX_PIPELINES))
+
+
+class Context0:
+
+ def __init__(self):
+ self.cores = [Cores0() for i in range(0, constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.n_pipelines0 = 0
+ self.pos = 0
+ self.file_comment = ""
+ self.ctx1 = None
+ self.ctx2 = None
+
+ def stage0_print(self):
+ print('printing Context0 obj')
+ print('c0.cores(n_pipelines) = [ ', end='')
+ for cores_count in range(0, constants.MAX_CORES):
+ print(self.cores[cores_count].n_pipelines, end=' ')
+ print(']')
+ print('c0.n_cores = %d' % self.n_cores)
+ print('c0.n_pipelines = %d' % self.n_pipelines)
+ print('c0.n_pipelines0 = %d' % self.n_pipelines0)
+ print('c0.pos = %d' % self.pos)
+ print('c0.file_comment = %s' % self.file_comment)
+ if (self.ctx1 is not None):
+ print('c0.ctx1 = ', end='')
+ print(repr(self.ctx1))
+ else:
+ print('c0.ctx1 = None')
+
+ if (self.ctx2 is not None):
+ print('c0.ctx2 = ', end='')
+ print(repr(self.ctx2))
+ else:
+ print('c0.ctx2 = None')
+
+ def stage0_init(self, num_cores, num_pipelines, ctx1, ctx2):
+ self.n_cores = num_cores
+ self.n_pipelines = num_pipelines
+ self.ctx1 = ctx1
+ self.ctx2 = ctx2
+
+ def stage0_process(self):
+ # stage0 init
+ self.cores[0].n_pipelines = self.n_pipelines
+ self.n_pipelines0 = 0
+ self.pos = 1
+
+ while True:
+ # go forward
+ while True:
+ if ((self.pos < self.n_cores) and (self.n_pipelines0 > 0)):
+ self.cores[self.pos].n_pipelines = min(
+ self.cores[self.pos - 1].n_pipelines,
+ self.n_pipelines0)
+ self.n_pipelines0 -= self.cores[self.pos].n_pipelines
+ self.pos += 1
+ else:
+ break
+
+ # check solution
+ if (self.n_pipelines0 == 0):
+ self.stage0_log()
+ self.ctx1.stage1_init(self, self.ctx2) # self is object c0
+ self.ctx1.stage1_process()
+
+ # go backward
+ while True:
+ if (self.pos == 0):
+ return
+
+ self.pos -= 1
+ if ((self.cores[self.pos].n_pipelines > 1) and
+ (self.pos != (self.n_cores - 1))):
+ break
+
+ self.n_pipelines0 += self.cores[self.pos].n_pipelines
+ self.cores[self.pos].n_pipelines = 0
+
+ # rearm
+ self.cores[self.pos].n_pipelines -= 1
+ self.n_pipelines0 += 1
+ self.pos += 1
+
+ def stage0_log(self):
+ tmp_file_comment = ""
+ if(enable_stage0_traceout != 1):
+ return
+
+ print('STAGE0: ', end='')
+ tmp_file_comment += 'STAGE0: '
+ for cores_count in range(0, self.n_cores):
+ print('C%d = %d\t'
+ % (cores_count,
+ self.cores[cores_count].n_pipelines), end='')
+ tmp_file_comment += "C{} = {}\t".format(
+ cores_count, self.cores[cores_count].n_pipelines)
+ # end for
+ print('')
+ self.ctx1.stage0_file_comment = tmp_file_comment
+ self.ctx2.stage0_file_comment = tmp_file_comment
+
+
+class Context1:
+ _fileTrace = None
+
+ def __init__(self):
+ self.cores = [Cores1() for i in range(constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.stage0_file_comment = ""
+ self.stage1_file_comment = ""
+
+ self.ctx2 = None
+ self.arr_pipelines2cores = []
+
+ def stage1_reset(self):
+ for i in range(constants.MAX_CORES):
+ self.cores[i].pipelines = 0
+ self.cores[i].n_pipelines = 0
+
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.ctx2 = None
+ # clear list
+ del self.arr_pipelines2cores[:]
+
+ def stage1_print(self):
+ print('printing Context1 obj')
+ print('ctx1.cores(pipelines,n_pipelines) = [ ', end='')
+ for cores_count in range(0, constants.MAX_CORES):
+ print('(%d,%d)' % (self.cores[cores_count].pipelines,
+ self.cores[cores_count].n_pipelines), end=' ')
+ print(']')
+ print('ctx1.n_cores = %d' % self.n_cores)
+ print('ctx1.n_pipelines = %d' % self.n_pipelines)
+ print('ctx1.pos = %d' % self.pos)
+ print('ctx1.stage0_file_comment = %s' % self.stage0_file_comment)
+ print('ctx1.stage1_file_comment = %s' % self.stage1_file_comment)
+ if (self.ctx2 is not None):
+ print('ctx1.ctx2 = ', end='')
+ print(self.ctx2)
+ else:
+ print('ctx1.ctx2 = None')
+
+ def stage1_init(self, c0, ctx2):
+ self.stage1_reset()
+ self.n_cores = 0
+ while (c0.cores[self.n_cores].n_pipelines > 0):
+ self.n_cores += 1
+
+ self.n_pipelines = c0.n_pipelines
+ self.ctx2 = ctx2
+
+ self.arr_pipelines2cores = [0] * self.n_pipelines
+
+ i = 0
+ while (i < self.n_cores):
+ self.cores[i].n_pipelines = c0.cores[i].n_pipelines
+ i += 1
+
+ def stage1_process(self):
+ pipelines_max = len2mask(self.n_pipelines)
+ while True:
+ pos = 0
+ overlap = 0
+
+ if (self.cores[self.pos].pipelines == pipelines_max):
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].pipelines = 0
+ self.pos -= 1
+ continue
+
+ self.cores[self.pos].pipelines += 1
+ if (popcount(self.cores[self.pos].pipelines) !=
+ self.cores[self.pos].n_pipelines):
+ continue
+
+ overlap = 0
+ pos = 0
+ while (pos < self.pos):
+ if ((self.cores[self.pos].pipelines) &
+ (self.cores[pos].pipelines)):
+ overlap = 1
+ break
+ pos += 1
+
+ if (overlap):
+ continue
+
+ if ((self.pos > 0) and
+ ((self.cores[self.pos].n_pipelines) ==
+ (self.cores[self.pos - 1].n_pipelines)) and
+ ((self.cores[self.pos].pipelines) <
+ (self.cores[self.pos - 1].pipelines))):
+ continue
+
+ if (self.pos == self.n_cores - 1):
+ self.stage1_log()
+ self.ctx2.stage2_init(self)
+ self.ctx2.stage2_process()
+
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].pipelines = 0
+ self.pos -= 1
+ continue
+
+ self.pos += 1
+
+ def stage1_log(self):
+ tmp_file_comment = ""
+ if(enable_stage1_traceout == 1):
+ print('STAGE1: ', end='')
+ tmp_file_comment += 'STAGE1: '
+ i = 0
+ while (i < self.n_cores):
+ print('C%d = [' % i, end='')
+ tmp_file_comment += "C{} = [".format(i)
+
+ j = self.n_pipelines - 1
+ while (j >= 0):
+ cond = ((self.cores[i].pipelines) & (1 << j))
+ if (cond):
+ print('1', end='')
+ tmp_file_comment += '1'
+ else:
+ print('0', end='')
+ tmp_file_comment += '0'
+ j -= 1
+
+ print(']\t', end='')
+ tmp_file_comment += ']\t'
+ i += 1
+
+ print('\n', end='')
+ self.stage1_file_comment = tmp_file_comment
+ self.ctx2.stage1_file_comment = tmp_file_comment
+
+ # check if file traceing is enabled
+ if(enable_stage1_fileout != 1):
+ return
+
+ # spit out the combination to file
+ self.stage1_process_file()
+
+ def stage1_updateCoresInBuf(self, nPipeline, sCore):
+ rePipeline = self._fileTrace.arr_pipelines[nPipeline]
+ rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
+ reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
+ sSubs = 'core = ' + sCore + '\n'
+
+ reg_pipeline = re.compile(rePipeline)
+ search_match = reg_pipeline.search(self._fileTrace.in_buf)
+
+ if(search_match):
+ pos = search_match.start()
+ substr1 = self._fileTrace.in_buf[:pos]
+ substr2 = self._fileTrace.in_buf[pos:]
+ substr2 = re.sub(reCore, sSubs, substr2, 1)
+ self._fileTrace.in_buf = substr1 + substr2
+
+ def stage1_process_file(self):
+ outFileName = os.path.join(self._fileTrace.out_path,
+ self._fileTrace.prefix_outfile)
+ outFileName += "_{}CoReS".format(self.n_cores)
+
+ i = 0 # represents core number
+ while (i < self.n_cores):
+ j = self.n_pipelines - 1
+ pipeline_idx = 0
+ while(j >= 0):
+ cond = ((self.cores[i].pipelines) & (1 << j))
+ if (cond):
+ # update the pipelines array to match the core
+ # only in case of cond match
+ self.arr_pipelines2cores[
+ pipeline_idx] = fileTrace.in_physical_cores[i]
+
+ j -= 1
+ pipeline_idx += 1
+
+ i += 1
+
+ # update the in_buf as per the arr_pipelines2cores
+ for pipeline_idx in range(len(self.arr_pipelines2cores)):
+ outFileName += "_{}".format(self.arr_pipelines2cores[pipeline_idx])
+ self.stage1_updateCoresInBuf(
+ pipeline_idx, self.arr_pipelines2cores[pipeline_idx])
+
+ # by now the in_buf is all set to be written to file
+ outFileName += self._fileTrace.suffix_outfile
+ outputFile = open(outFileName, "w")
+
+ # write out the comments
+ strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
+ outputFile.write(
+ "; =============== Pipeline-to-Core Mapping ================\n"
+ "; Generated from file {}\n"
+ "; Input pipelines = {}\n"
+ "; Input cores = {}\n"
+ "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {}\n"
+ .format(
+ self._fileTrace.in_file_namepath,
+ fileTrace.arr_pipelines,
+ fileTrace.in_physical_cores,
+ self._fileTrace.n_pipelines,
+ self._fileTrace.n_cores,
+ strTruncated,
+ self._fileTrace.hyper_thread))
+
+ outputFile.write(
+ "; {stg0cmt}\n"
+ "; {stg1cmt}\n"
+ "; ========================================================\n"
+ "; \n"
+ .format(
+ stg0cmt=self.stage0_file_comment,
+ stg1cmt=self.stage1_file_comment))
+
+ # write buffer contents
+ outputFile.write(self._fileTrace.in_buf)
+ outputFile.flush()
+ outputFile.close()
+
+
+class Context2:
+ _fileTrace = None
+
+ def __init__(self):
+ self.cores = [Cores2() for i in range(constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.stage0_file_comment = ""
+ self.stage1_file_comment = ""
+ self.stage2_file_comment = ""
+
+ # each array entry is a pipeline mapped to core stored as string
+ # pipeline ranging from 1 to n, however stored in zero based array
+ self.arr2_pipelines2cores = []
+
+ def stage2_print(self):
+ print('printing Context2 obj')
+ print('ctx2.cores(pipelines, n_pipelines, counter, counter_max) =')
+ for cores_count in range(0, constants.MAX_CORES):
+ print('core[%d] = (%d,%d,%d,%d)' % (
+ cores_count,
+ self.cores[cores_count].pipelines,
+ self.cores[cores_count].n_pipelines,
+ self.cores[cores_count].counter,
+ self.cores[cores_count].counter_max))
+
+ print('ctx2.n_cores = %d' % self.n_cores, end='')
+ print('ctx2.n_pipelines = %d' % self.n_pipelines, end='')
+ print('ctx2.pos = %d' % self.pos)
+ print('ctx2.stage0_file_comment = %s' %
+ self.self.stage0_file_comment)
+ print('ctx2.stage1_file_comment = %s' %
+ self.self.stage1_file_comment)
+ print('ctx2.stage2_file_comment = %s' %
+ self.self.stage2_file_comment)
+
+ def stage2_reset(self):
+ for i in range(0, constants.MAX_CORES):
+ self.cores[i].pipelines = 0
+ self.cores[i].n_pipelines = 0
+ self.cores[i].counter = 0
+ self.cores[i].counter_max = 0
+
+ for idx in range(0, constants.MAX_PIPELINES):
+ self.cores[i].bitpos[idx] = 0
+
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ # clear list
+ del self.arr2_pipelines2cores[:]
+
+ def bitpos_load(self, coreidx):
+ i = j = 0
+ while (i < self.n_pipelines):
+ if ((self.cores[coreidx].pipelines) &
+ (1 << i)):
+ self.cores[coreidx].bitpos[j] = i
+ j += 1
+ i += 1
+ self.cores[coreidx].n_pipelines = j
+
+ def bitpos_apply(self, in_buf, pos, n_pos):
+ out = 0
+ for i in range(0, n_pos):
+ out |= (in_buf & (1 << i)) << (pos[i] - i)
+
+ return out
+
+ def stage2_init(self, ctx1):
+ self.stage2_reset()
+ self.n_cores = ctx1.n_cores
+ self.n_pipelines = ctx1.n_pipelines
+
+ self.arr2_pipelines2cores = [''] * self.n_pipelines
+
+ core_idx = 0
+ while (core_idx < self.n_cores):
+ self.cores[core_idx].pipelines = ctx1.cores[core_idx].pipelines
+
+ self.bitpos_load(core_idx)
+ core_idx += 1
+
+ def stage2_log(self):
+ tmp_file_comment = ""
+ if(enable_stage2_traceout == 1):
+ print('STAGE2: ', end='')
+ tmp_file_comment += 'STAGE2: '
+
+ for i in range(0, self.n_cores):
+ mask = len2mask(self.cores[i].n_pipelines)
+ pipelines_ht0 = self.bitpos_apply(
+ (~self.cores[i].counter) & mask,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ pipelines_ht1 = self.bitpos_apply(
+ self.cores[i].counter,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ print('C%dHT0 = [' % i, end='')
+ tmp_file_comment += "C{}HT0 = [".format(i)
+ tmp_file_comment += bitstring_write(
+ pipelines_ht0, self.n_pipelines)
+
+ print(']\tC%dHT1 = [' % i, end='')
+ tmp_file_comment += "]\tC{}HT1 = [".format(i)
+ tmp_file_comment += bitstring_write(
+ pipelines_ht1, self.n_pipelines)
+ print(']\t', end='')
+ tmp_file_comment += ']\t'
+
+ print('')
+ self.stage2_file_comment = tmp_file_comment
+
+ # check if file traceing is enabled
+ if(enable_stage2_fileout != 1):
+ return
+ # spit out the combination to file
+ self.stage2_process_file()
+
+ def stage2_updateCoresInBuf(self, nPipeline, sCore):
+ rePipeline = self._fileTrace.arr_pipelines[nPipeline]
+ rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
+ reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
+ sSubs = 'core = ' + sCore + '\n'
+
+ reg_pipeline = re.compile(rePipeline)
+ search_match = reg_pipeline.search(self._fileTrace.in_buf)
+
+ if(search_match):
+ pos = search_match.start()
+ substr1 = self._fileTrace.in_buf[:pos]
+ substr2 = self._fileTrace.in_buf[pos:]
+ substr2 = re.sub(reCore, sSubs, substr2, 1)
+ self._fileTrace.in_buf = substr1 + substr2
+
+ def pipelines2cores(self, n, n_bits, nCore, bHT):
+ if (n_bits > 64):
+ return
+
+ i = n_bits - 1
+ pipeline_idx = 0
+ while (i >= 0):
+ cond = (n & (1 << i))
+ if (cond):
+ # update the pipelines array to match the core
+ # only in case of cond match
+ # PIPELINE0 and core 0 are reserved
+ if(bHT):
+ tmpCore = fileTrace.in_physical_cores[nCore] + 'h'
+ self.arr2_pipelines2cores[pipeline_idx] = tmpCore
+ else:
+ self.arr2_pipelines2cores[pipeline_idx] = \
+ fileTrace.in_physical_cores[nCore]
+
+ i -= 1
+ pipeline_idx += 1
+
+ def stage2_process_file(self):
+ outFileName = os.path.join(self._fileTrace.out_path,
+ self._fileTrace.prefix_outfile)
+ outFileName += "_{}CoReS".format(self.n_cores)
+
+ for i in range(0, self.n_cores):
+ mask = len2mask(self.cores[i].n_pipelines)
+ pipelines_ht0 = self.bitpos_apply((~self.cores[i].counter) & mask,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ pipelines_ht1 = self.bitpos_apply(self.cores[i].counter,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ # update pipelines to core mapping
+ self.pipelines2cores(pipelines_ht0, self.n_pipelines, i, False)
+ self.pipelines2cores(pipelines_ht1, self.n_pipelines, i, True)
+
+ # update the in_buf as per the arr_pipelines2cores
+ for pipeline_idx in range(len(self.arr2_pipelines2cores)):
+ outFileName += "_{}".format(
+ self.arr2_pipelines2cores[pipeline_idx])
+ self.stage2_updateCoresInBuf(
+ pipeline_idx, self.arr2_pipelines2cores[pipeline_idx])
+
+ # by now the in_buf is all set to be written to file
+ outFileName += self._fileTrace.suffix_outfile
+ outputFile = open(outFileName, "w")
+
+ # write the file comments
+ strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
+ outputFile.write(
+ "; =============== Pipeline-to-Core Mapping ================\n"
+ "; Generated from file {}\n"
+ "; Input pipelines = {}\n"
+ "; Input cores = {}\n"
+ "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {} \n"
+ .format(
+ self._fileTrace.in_file_namepath,
+ fileTrace.arr_pipelines,
+ fileTrace.in_physical_cores,
+ self._fileTrace.n_pipelines,
+ self._fileTrace.n_cores,
+ strTruncated,
+ self._fileTrace.hyper_thread))
+
+ outputFile.write(
+ "; {stg0cmt}\n"
+ "; {stg1cmt}\n"
+ "; {stg2cmt}\n"
+ "; ========================================================\n"
+ "; \n"
+ .format(
+ stg0cmt=self.stage0_file_comment,
+ stg1cmt=self.stage1_file_comment,
+ stg2cmt=self.stage2_file_comment))
+
+ # write the buffer contents
+ outputFile.write(self._fileTrace.in_buf)
+ outputFile.flush()
+ outputFile.close()
+
+ def stage2_process(self):
+ i = 0
+ while(i < self.n_cores):
+ self.cores[i].counter_max = len2mask(
+ self.cores[i].n_pipelines - 1)
+ i += 1
+
+ self.pos = self.n_cores - 1
+ while True:
+ if (self.pos == self.n_cores - 1):
+ self.stage2_log()
+
+ if (self.cores[self.pos].counter ==
+ self.cores[self.pos].counter_max):
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].counter = 0
+ self.pos -= 1
+ continue
+
+ self.cores[self.pos].counter += 1
+ if(self.pos < self.n_cores - 1):
+ self.pos += 1
+
+
+class FileTrace:
+
+ def __init__(self, filenamepath):
+ self.in_file_namepath = os.path.abspath(filenamepath)
+ self.in_filename = os.path.basename(self.in_file_namepath)
+ self.in_path = os.path.dirname(self.in_file_namepath)
+
+ filenamesplit = self.in_filename.split('.')
+ self.prefix_outfile = filenamesplit[0]
+ self.suffix_outfile = ".cfg"
+
+ # output folder: in the same folder as input file
+ # create new folder in the name of input file
+ self.out_path = os.path.join(
+ os.path.abspath(os.path.dirname(__file__)),
+ self.prefix_outfile)
+
+ try:
+ os.makedirs(self.out_path)
+ except OSError as excep:
+ if excep.errno == errno.EEXIST and os.path.isdir(self.out_path):
+ pass
+ else:
+ raise
+
+ self.in_buf = None
+ self.arr_pipelines = [] # holds the positions of search
+
+ self.max_cores = 15
+ self.max_pipelines = 15
+
+ self.in_physical_cores = None
+ self.hyper_thread = None
+
+ # save the num of pipelines determined from input file
+ self.n_pipelines = 0
+ # save the num of cores input (or the truncated value)
+ self.n_cores = 0
+ self.ncores_truncated = False
+
+ def print_TraceFile(self):
+ print("self.in_file_namepath = ", self.in_file_namepath)
+ print("self.in_filename = ", self.in_filename)
+ print("self.in_path = ", self.in_path)
+ print("self.out_path = ", self.out_path)
+ print("self.prefix_outfile = ", self.prefix_outfile)
+ print("self.suffix_outfile = ", self.suffix_outfile)
+ print("self.in_buf = ", self.in_buf)
+ print("self.arr_pipelines =", self.arr_pipelines)
+ print("self.in_physical_cores", self.in_physical_cores)
+ print("self.hyper_thread", self.hyper_thread)
+
+
+def process(n_cores, n_pipelines, fileTrace):
+ '''process and map pipelines, cores.'''
+ if (n_cores == 0):
+ sys.exit('N_CORES is 0, exiting')
+
+ if (n_pipelines == 0):
+ sys.exit('N_PIPELINES is 0, exiting')
+
+ if (n_cores > n_pipelines):
+ print('\nToo many cores, truncating N_CORES to N_PIPELINES')
+ n_cores = n_pipelines
+ fileTrace.ncores_truncated = True
+
+ fileTrace.n_pipelines = n_pipelines
+ fileTrace.n_cores = n_cores
+
+ strTruncated = ("", "(Truncated)")[fileTrace.ncores_truncated]
+ print("N_PIPELINES = {}, N_CORES = {} {}"
+ .format(n_pipelines, n_cores, strTruncated))
+ print("---------------------------------------------------------------")
+
+ ctx0_inst = Context0()
+ ctx1_inst = Context1()
+ ctx2_inst = Context2()
+
+ # initialize the class variables
+ ctx1_inst._fileTrace = fileTrace
+ ctx2_inst._fileTrace = fileTrace
+
+ ctx0_inst.stage0_init(n_cores, n_pipelines, ctx1_inst, ctx2_inst)
+ ctx0_inst.stage0_process()
+
+
+def validate_core(core):
+ match = reg_phycore.match(core)
+ if(match):
+ return True
+ else:
+ return False
+
+
+def validate_phycores(phy_cores):
+ '''validate physical cores, check if unique.'''
+ # eat up whitespaces
+ phy_cores = phy_cores.strip().split(',')
+
+ # check if the core list is unique
+ if(len(phy_cores) != len(set(phy_cores))):
+ print('list of physical cores has duplicates')
+ return None
+
+ for core in phy_cores:
+ if not validate_core(core):
+ print('invalid physical core specified.')
+ return None
+ return phy_cores
+
+
+def scanconfigfile(fileTrace):
+ '''scan input file for pipelines, validate then process.'''
+ # open file
+ filetoscan = open(fileTrace.in_file_namepath, 'r')
+ fileTrace.in_buf = filetoscan.read()
+
+ # reset iterator on open file
+ filetoscan.seek(0)
+
+ # scan input file for pipelines
+ # master pipelines to be ignored
+ pattern_pipeline = r'\[PIPELINE\d*\]'
+ pattern_mastertype = r'type\s*=\s*MASTER'
+
+ pending_pipeline = False
+ for line in filetoscan:
+ match_pipeline = re.search(pattern_pipeline, line)
+ match_type = re.search('type\s*=', line)
+ match_mastertype = re.search(pattern_mastertype, line)
+
+ if(match_pipeline):
+ sPipeline = line[match_pipeline.start():match_pipeline.end()]
+ pending_pipeline = True
+ elif(match_type):
+ # found a type definition...
+ if(match_mastertype is None):
+ # and this is not a master pipeline...
+ if(pending_pipeline):
+ # add it to the list of pipelines to be mapped
+ fileTrace.arr_pipelines.append(sPipeline)
+ pending_pipeline = False
+ else:
+ # and this is a master pipeline...
+ # ignore the current and move on to next
+ sPipeline = ""
+ pending_pipeline = False
+ filetoscan.close()
+
+ # validate if pipelines are unique
+ if(len(fileTrace.arr_pipelines) != len(set(fileTrace.arr_pipelines))):
+ sys.exit('Error: duplicate pipelines in input file')
+
+ num_pipelines = len(fileTrace.arr_pipelines)
+ num_cores = len(fileTrace.in_physical_cores)
+
+ print("-------------------Pipeline-to-core mapping--------------------")
+ print("Input pipelines = {}\nInput cores = {}"
+ .format(fileTrace.arr_pipelines, fileTrace.in_physical_cores))
+
+ # input configuration file validations goes here
+ if (num_cores > fileTrace.max_cores):
+ sys.exit('Error: number of cores specified > max_cores (%d)' %
+ fileTrace.max_cores)
+
+ if (num_pipelines > fileTrace.max_pipelines):
+ sys.exit('Error: number of pipelines in input \
+ cfg file > max_pipelines (%d)' % fileTrace.max_pipelines)
+
+ # call process to generate pipeline-to-core mapping, trace and log
+ process(num_cores, num_pipelines, fileTrace)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='mappipelines')
+
+ reqNamedGrp = parser.add_argument_group('required named args')
+ reqNamedGrp.add_argument(
+ '-i',
+ '--input-file',
+ type=argparse.FileType('r'),
+ help='Input config file',
+ required=True)
+
+ reqNamedGrp.add_argument(
+ '-pc',
+ '--physical-cores',
+ type=validate_phycores,
+ help='''Enter available CPU cores in
+ format:\"<core>,<core>,...\"
+ where each core format: \"s<SOCKETID>c<COREID>\"
+ where SOCKETID={0..9}, COREID={1-99}''',
+ required=True)
+
+ # add optional arguments
+ parser.add_argument(
+ '-ht',
+ '--hyper-thread',
+ help='enable/disable hyper threading. default is ON',
+ default='ON',
+ choices=['ON', 'OFF'])
+
+ parser.add_argument(
+ '-nO',
+ '--no-output-file',
+ help='''disable output config file generation.
+ Output file generation is enabled by default''',
+ action="store_true")
+
+ args = parser.parse_args()
+
+ if(args.physical_cores is None):
+ parser.error("invalid physical_cores specified")
+
+ # create object of FileTrace and initialise
+ fileTrace = FileTrace(args.input_file.name)
+ fileTrace.in_physical_cores = args.physical_cores
+ fileTrace.hyper_thread = args.hyper_thread
+
+ if(fileTrace.hyper_thread == 'OFF'):
+ print("!!!!disabling stage2 HT!!!!")
+ enable_stage2_traceout = 0
+ enable_stage2_fileout = 0
+ elif(fileTrace.hyper_thread == 'ON'):
+ print("!!!!HT enabled. disabling stage1 file generation.!!!!")
+ enable_stage1_fileout = 0
+
+ if(args.no_output_file is True):
+ print("!!!!disabling stage1 and stage2 fileout!!!!")
+ enable_stage1_fileout = 0
+ enable_stage2_fileout = 0
+
+ scanconfigfile(fileTrace)
diff --git a/examples/ip_pipeline/config_check.c b/examples/ip_pipeline/config_check.c
index fd9ff495..af1b6284 100644
--- a/examples/ip_pipeline/config_check.c
+++ b/examples/ip_pipeline/config_check.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,6 +56,26 @@ check_mempools(struct app_params *app)
}
}
+static inline uint32_t
+link_rxq_used(struct app_link_params *link, uint32_t q_id)
+{
+ uint32_t i;
+
+ if ((link->arp_q == q_id) ||
+ (link->tcp_syn_q == q_id) ||
+ (link->ip_local_q == q_id) ||
+ (link->tcp_local_q == q_id) ||
+ (link->udp_local_q == q_id) ||
+ (link->sctp_local_q == q_id))
+ return 1;
+
+ for (i = 0; i < link->n_rss_qs; i++)
+ if (link->rss_qs[i] == q_id)
+ return 1;
+
+ return 0;
+}
+
static void
check_links(struct app_params *app)
{
@@ -90,14 +110,12 @@ check_links(struct app_params *app)
rxq_max = link->udp_local_q;
if (link->sctp_local_q > rxq_max)
rxq_max = link->sctp_local_q;
+ for (i = 0; i < link->n_rss_qs; i++)
+ if (link->rss_qs[i] > rxq_max)
+ rxq_max = link->rss_qs[i];
for (i = 1; i <= rxq_max; i++)
- APP_CHECK(((link->arp_q == i) ||
- (link->tcp_syn_q == i) ||
- (link->ip_local_q == i) ||
- (link->tcp_local_q == i) ||
- (link->udp_local_q == i) ||
- (link->sctp_local_q == i)),
+ APP_CHECK((link_rxq_used(link, i)),
"%s RXQs are not contiguous (A)\n", link->name);
n_rxq = app_link_get_n_rxq(app, link);
@@ -118,7 +136,7 @@ check_links(struct app_params *app)
"%s RXQs are not contiguous (C)\n", link->name);
}
- /* Check that link RXQs are contiguous */
+ /* Check that link TXQs are contiguous */
n_txq = app_link_get_n_txq(app, link);
APP_CHECK((n_txq), "%s does not have any TXQ\n", link->name);
@@ -298,6 +316,29 @@ check_tms(struct app_params *app)
}
static void
+check_knis(struct app_params *app) {
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_kni; i++) {
+ struct app_pktq_kni_params *p = &app->kni_params[i];
+ uint32_t n_readers = app_kni_get_readers(app, p);
+ uint32_t n_writers = app_kni_get_writers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
check_sources(struct app_params *app)
{
uint32_t i;
@@ -435,6 +476,7 @@ app_config_check(struct app_params *app)
check_txqs(app);
check_swqs(app);
check_tms(app);
+ check_knis(app);
check_sources(app);
check_sinks(app);
check_msgqs(app);
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
index e5efd03e..0adca98f 100644
--- a/examples/ip_pipeline/config_parse.c
+++ b/examples/ip_pipeline/config_parse.c
@@ -1,4 +1,4 @@
-/*-
+/*-
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
@@ -30,6 +30,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
@@ -80,6 +81,11 @@ static const struct app_link_params link_params_default = {
.tcp_local_q = 0,
.udp_local_q = 0,
.sctp_local_q = 0,
+ .rss_qs = {0},
+ .n_rss_qs = 0,
+ .rss_proto_ipv4 = ETH_RSS_IPV4,
+ .rss_proto_ipv6 = ETH_RSS_IPV6,
+ .rss_proto_l2 = 0,
.state = 0,
.ip = 0,
.depth = 0,
@@ -103,6 +109,13 @@ static const struct app_link_params link_params_default = {
.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
.split_hdr_size = 0, /* Header split buffer size */
},
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
},
@@ -176,6 +189,20 @@ struct app_pktq_tm_params default_tm_params = {
.burst_write = 32,
};
+struct app_pktq_kni_params default_kni_params = {
+ .parsed = 0,
+ .socket_id = 0,
+ .core_id = 0,
+ .hyper_th_id = 0,
+ .force_bind = 0,
+
+ .mempool_id = 0,
+ .burst_read = 32,
+ .burst_write = 32,
+ .dropless = 0,
+ .n_retries = 0,
+};
+
struct app_pktq_source_params default_source_params = {
.parsed = 0,
.mempool_id = 0,
@@ -229,306 +256,136 @@ app_print_usage(char *prgname)
rte_exit(0, app_usage, prgname, app_params_default.config_file);
}
-#define skip_white_spaces(pos) \
-({ \
- __typeof__(pos) _p = (pos); \
- for ( ; isspace(*_p); _p++); \
- _p; \
+#define APP_PARAM_ADD(set, key) \
+({ \
+ ssize_t pos = APP_PARAM_FIND(set, key); \
+ ssize_t size = RTE_DIM(set); \
+ \
+ if (pos < 0) { \
+ for (pos = 0; pos < size; pos++) { \
+ if (!APP_PARAM_VALID(&((set)[pos]))) \
+ break; \
+ } \
+ \
+ APP_CHECK((pos < size), \
+ "Parse error: size of %s is limited to %u elements",\
+ #set, (uint32_t) size); \
+ \
+ (set)[pos].name = strdup(key); \
+ APP_CHECK(((set)[pos].name), \
+ "Parse error: no free memory"); \
+ } \
+ pos; \
})
-#define PARSER_PARAM_ADD_CHECK(result, params_array, section_name) \
-do { \
- APP_CHECK((result != -EINVAL), \
- "Parse error: no free memory"); \
- APP_CHECK((result != -ENOMEM), \
- "Parse error: too many \"%s\" sections", section_name); \
- APP_CHECK(((result >= 0) && (params_array)[result].parsed == 0),\
- "Parse error: duplicate \"%s\" section", section_name); \
- APP_CHECK((result >= 0), \
- "Parse error in section \"%s\"", section_name); \
-} while (0)
-
-int
-parser_read_arg_bool(const char *p)
-{
- p = skip_white_spaces(p);
- int result = -EINVAL;
-
- if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
- ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
- p += 3;
- result = 1;
- }
-
- if (((p[0] == 'o') && (p[1] == 'n')) ||
- ((p[0] == 'O') && (p[1] == 'N'))) {
- p += 2;
- result = 1;
- }
+#define APP_PARAM_ADD_LINK_FOR_RXQ(app, rxq_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id, queue_id; \
+ \
+ sscanf((rxq_name), "RXQ%" SCNu32 ".%" SCNu32, &link_id, &queue_id);\
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
- if (((p[0] == 'n') && (p[1] == 'o')) ||
- ((p[0] == 'N') && (p[1] == 'O'))) {
- p += 2;
- result = 0;
- }
+#define APP_PARAM_ADD_LINK_FOR_TXQ(app, txq_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id, queue_id; \
+ \
+ sscanf((txq_name), "TXQ%" SCNu32 ".%" SCNu32, &link_id, &queue_id);\
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
- if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
- ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
- p += 3;
- result = 0;
- }
+#define APP_PARAM_ADD_LINK_FOR_TM(app, tm_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id; \
+ \
+ sscanf((tm_name), "TM%" SCNu32, &link_id); \
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
- p = skip_white_spaces(p);
+#define APP_PARAM_ADD_LINK_FOR_KNI(app, kni_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id; \
+ \
+ sscanf((kni_name), "KNI%" SCNu32, &link_id); \
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
- if (p[0] != '\0')
- return -EINVAL;
+#define PARSE_CHECK_DUPLICATE_SECTION(obj) \
+do { \
+ APP_CHECK(((obj)->parsed == 0), \
+ "Parse error: duplicate \"%s\" section", (obj)->name); \
+ (obj)->parsed++; \
+} while (0)
- return result;
-}
+#define PARSE_CHECK_DUPLICATE_SECTION_EAL(obj) \
+do { \
+ APP_CHECK(((obj)->parsed == 0), \
+ "Parse error: duplicate \"%s\" section", "EAL"); \
+ (obj)->parsed++; \
+} while (0)
#define PARSE_ERROR(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": entry \"%s\"\n", section, entry)
+APP_CHECK(exp, "Parse error in section \"%s\": entry \"%s\"", section, entry)
#define PARSE_ERROR_MESSAGE(exp, section, entry, message) \
-APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": %s\n", \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": %s", \
section, entry, message)
+#define PARSE_ERROR_NO_ELEMENTS(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "no elements detected", \
+ section, entry)
+
+#define PARSE_ERROR_TOO_MANY_ELEMENTS(exp, section, entry, max) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "maximum number of elements allowed is %u", \
+ section, entry, max)
+
+#define PARSE_ERROR_INVALID_ELEMENT(exp, section, entry, value) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "Invalid element value \"%s\"", \
+ section, entry, value)
#define PARSE_ERROR_MALLOC(exp) \
-APP_CHECK(exp, "Parse error: no free memory\n")
+APP_CHECK(exp, "Parse error: no free memory")
#define PARSE_ERROR_SECTION(exp, section) \
APP_CHECK(exp, "Parse error in section \"%s\"", section)
#define PARSE_ERROR_SECTION_NO_ENTRIES(exp, section) \
-APP_CHECK(exp, "Parse error in section \"%s\": no entries\n", section)
+APP_CHECK(exp, "Parse error in section \"%s\": no entries", section)
#define PARSE_WARNING_IGNORED(exp, section, entry) \
do \
if (!(exp)) \
fprintf(stderr, "Parse warning in section \"%s\": " \
- "entry \"%s\" is ignored\n", section, entry); \
+ "entry \"%s\" is ignored", section, entry); \
while (0)
#define PARSE_ERROR_INVALID(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": unrecognized entry \"%s\"\n",\
+APP_CHECK(exp, "Parse error in section \"%s\": unrecognized entry \"%s\"",\
section, entry)
#define PARSE_ERROR_DUPLICATE(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": duplicate entry \"%s\"\n",\
+APP_CHECK(exp, "Parse error in section \"%s\": duplicate entry \"%s\"", \
section, entry)
-int
-parser_read_uint64(uint64_t *value, const char *p)
-{
- char *next;
- uint64_t val;
-
- p = skip_white_spaces(p);
- if (!isdigit(*p))
- return -EINVAL;
-
- val = strtoul(p, &next, 10);
- if (p == next)
- return -EINVAL;
-
- p = next;
- switch (*p) {
- case 'T':
- val *= 1024ULL;
- /* fall through */
- case 'G':
- val *= 1024ULL;
- /* fall through */
- case 'M':
- val *= 1024ULL;
- /* fall through */
- case 'k':
- case 'K':
- val *= 1024ULL;
- p++;
- break;
- }
-
- p = skip_white_spaces(p);
- if (*p != '\0')
- return -EINVAL;
-
- *value = val;
- return 0;
-}
-
-int
-parser_read_uint32(uint32_t *value, const char *p)
-{
- uint64_t val = 0;
- int ret = parser_read_uint64(&val, p);
-
- if (ret < 0)
- return ret;
-
- if (val > UINT32_MAX)
- return -ERANGE;
-
- *value = val;
- return 0;
-}
-
-int
-parse_pipeline_core(uint32_t *socket,
- uint32_t *core,
- uint32_t *ht,
- const char *entry)
-{
- size_t num_len;
- char num[8];
-
- uint32_t s = 0, c = 0, h = 0, val;
- uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
- const char *next = skip_white_spaces(entry);
- char type;
-
- /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
- while (*next != '\0') {
- /* If everything parsed nothing should left */
- if (s_parsed && c_parsed && h_parsed)
- return -EINVAL;
-
- type = *next;
- switch (type) {
- case 's':
- case 'S':
- if (s_parsed || c_parsed || h_parsed)
- return -EINVAL;
- s_parsed = 1;
- next++;
- break;
- case 'c':
- case 'C':
- if (c_parsed || h_parsed)
- return -EINVAL;
- c_parsed = 1;
- next++;
- break;
- case 'h':
- case 'H':
- if (h_parsed)
- return -EINVAL;
- h_parsed = 1;
- next++;
- break;
- default:
- /* If it start from digit it must be only core id. */
- if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
- return -EINVAL;
-
- type = 'C';
- }
-
- for (num_len = 0; *next != '\0'; next++, num_len++) {
- if (num_len == RTE_DIM(num))
- return -EINVAL;
-
- if (!isdigit(*next))
- break;
-
- num[num_len] = *next;
- }
-
- if (num_len == 0 && type != 'h' && type != 'H')
- return -EINVAL;
-
- if (num_len != 0 && (type == 'h' || type == 'H'))
- return -EINVAL;
-
- num[num_len] = '\0';
- val = strtol(num, NULL, 10);
-
- h = 0;
- switch (type) {
- case 's':
- case 'S':
- s = val;
- break;
- case 'c':
- case 'C':
- c = val;
- break;
- case 'h':
- case 'H':
- h = 1;
- break;
- }
- }
-
- *socket = s;
- *core = c;
- *ht = h;
- return 0;
-}
-
-static uint32_t
-get_hex_val(char c)
-{
- switch (c) {
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9':
- return c - '0';
- case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
- return c - 'A' + 10;
- case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
- return c - 'a' + 10;
- default:
- return 0;
- }
-}
-
-int
-parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
-{
- char *c;
- uint32_t len, i;
-
- /* Check input parameters */
- if ((src == NULL) ||
- (dst == NULL) ||
- (size == NULL) ||
- (*size == 0))
- return -1;
-
- len = strlen(src);
- if (((len & 3) != 0) ||
- (len > (*size) * 2))
- return -1;
- *size = len / 2;
-
- for (c = src; *c != 0; c++) {
- if ((((*c) >= '0') && ((*c) <= '9')) ||
- (((*c) >= 'A') && ((*c) <= 'F')) ||
- (((*c) >= 'a') && ((*c) <= 'f')))
- continue;
-
- return -1;
- }
-
- /* Convert chars to bytes */
- for (i = 0; i < *size; i++)
- dst[i] = get_hex_val(src[2 * i]) * 16 +
- get_hex_val(src[2 * i + 1]);
-
- return 0;
-}
-
-static size_t
-skip_digits(const char *src)
-{
- size_t i;
-
- for (i = 0; isdigit(src[i]); i++);
-
- return i;
-}
-
static int
validate_name(const char *name, const char *prefix, int num)
{
@@ -584,6 +441,8 @@ parse_eal(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+ PARSE_CHECK_DUPLICATE_SECTION_EAL(p);
+
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *entry = &entries[i];
@@ -963,413 +822,162 @@ parse_eal(struct app_params *app,
free(entries);
}
-static int
-parse_pipeline_pcap_source(struct app_params *app,
- struct app_pipeline_params *p,
- const char *file_name, const char *cp_size)
-{
- const char *next = NULL;
- char *end;
- uint32_t i;
- int parse_file = 0;
-
- if (file_name && !cp_size) {
- next = file_name;
- parse_file = 1; /* parse file path */
- } else if (cp_size && !file_name) {
- next = cp_size;
- parse_file = 0; /* parse copy size */
- } else
- return -EINVAL;
-
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
-
- if (p->n_pktq_in == 0)
- return -EINVAL;
-
- i = 0;
- while (*next != '\0') {
- uint32_t id;
-
- if (i >= p->n_pktq_in)
- return -EINVAL;
-
- id = p->pktq_in[i].id;
-
- end = strchr(next, ' ');
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
-
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
-
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
-
- if (parse_file) {
- app->source_params[id].file_name = strdup(name);
- if (app->source_params[id].file_name == NULL)
- return -ENOMEM;
- } else {
- if (parser_read_uint32(
- &app->source_params[id].n_bytes_per_pkt,
- name) != 0) {
- if (app->source_params[id].
- file_name != NULL)
- free(app->source_params[id].
- file_name);
- return -EINVAL;
- }
- }
-
- i++;
-
- if (i == p->n_pktq_in)
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int
-parse_pipeline_pcap_sink(struct app_params *app,
- struct app_pipeline_params *p,
- const char *file_name, const char *n_pkts_to_dump)
-{
- const char *next = NULL;
- char *end;
- uint32_t i;
- int parse_file = 0;
-
- if (file_name && !n_pkts_to_dump) {
- next = file_name;
- parse_file = 1; /* parse file path */
- } else if (n_pkts_to_dump && !file_name) {
- next = n_pkts_to_dump;
- parse_file = 0; /* parse copy size */
- } else
- return -EINVAL;
-
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
-
- if (p->n_pktq_out == 0)
- return -EINVAL;
-
- i = 0;
- while (*next != '\0') {
- uint32_t id;
-
- if (i >= p->n_pktq_out)
- return -EINVAL;
-
- id = p->pktq_out[i].id;
-
- end = strchr(next, ' ');
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
-
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
-
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
-
- if (parse_file) {
- app->sink_params[id].file_name = strdup(name);
- if (app->sink_params[id].file_name == NULL)
- return -ENOMEM;
- } else {
- if (parser_read_uint32(
- &app->sink_params[id].n_pkts_to_dump,
- name) != 0) {
- if (app->sink_params[id].file_name !=
- NULL)
- free(app->sink_params[id].
- file_name);
- return -EINVAL;
- }
- }
-
- i++;
-
- if (i == p->n_pktq_out)
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int
+static void
parse_pipeline_pktq_in(struct app_params *app,
struct app_pipeline_params *p,
- const char *value)
+ char *value)
{
- const char *next = value;
- char *end;
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
+ p->n_pktq_in = 0;
- while (*next != '\0') {
+ while (1) {
enum app_pktq_in_type type;
int id;
- char *end_space;
- char *end_tab;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
- next = skip_white_spaces(next);
- if (!next)
+ if (name == NULL)
break;
- end_space = strchr(next, ' ');
- end_tab = strchr(next, ' ');
-
- if (end_space && (!end_tab))
- end = end_space;
- else if ((!end_space) && end_tab)
- end = end_tab;
- else if (end_space && end_tab)
- end = RTE_MIN(end_space, end_tab);
- else
- end = NULL;
-
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
-
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
-
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_pktq_in < RTE_DIM(p->pktq_in)),
+ p->name, "pktq_in", (uint32_t)RTE_DIM(p->pktq_in));
if (validate_name(name, "RXQ", 2) == 0) {
type = APP_PKTQ_IN_HWQ;
id = APP_PARAM_ADD(app->hwq_in_params, name);
+ APP_PARAM_ADD_LINK_FOR_RXQ(app, name);
} else if (validate_name(name, "SWQ", 1) == 0) {
type = APP_PKTQ_IN_SWQ;
id = APP_PARAM_ADD(app->swq_params, name);
} else if (validate_name(name, "TM", 1) == 0) {
type = APP_PKTQ_IN_TM;
id = APP_PARAM_ADD(app->tm_params, name);
+ APP_PARAM_ADD_LINK_FOR_TM(app, name);
+ } else if (validate_name(name, "KNI", 1) == 0) {
+ type = APP_PKTQ_IN_KNI;
+ id = APP_PARAM_ADD(app->kni_params, name);
+ APP_PARAM_ADD_LINK_FOR_KNI(app, name);
} else if (validate_name(name, "SOURCE", 1) == 0) {
type = APP_PKTQ_IN_SOURCE;
id = APP_PARAM_ADD(app->source_params, name);
} else
- return -EINVAL;
-
- if (id < 0)
- return id;
+ PARSE_ERROR_INVALID_ELEMENT(0,
+ p->name, "pktq_in", name);
p->pktq_in[p->n_pktq_in].type = type;
p->pktq_in[p->n_pktq_in].id = (uint32_t) id;
p->n_pktq_in++;
}
- return 0;
+ PARSE_ERROR_NO_ELEMENTS((p->n_pktq_in > 0), p->name, "pktq_in");
}
-static int
+static void
parse_pipeline_pktq_out(struct app_params *app,
struct app_pipeline_params *p,
- const char *value)
+ char *value)
{
- const char *next = value;
- char *end;
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
+ p->n_pktq_out = 0;
- while (*next != '\0') {
+ while (1) {
enum app_pktq_out_type type;
int id;
- char *end_space;
- char *end_tab;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
- next = skip_white_spaces(next);
- if (!next)
+ if (name == NULL)
break;
- end_space = strchr(next, ' ');
- end_tab = strchr(next, ' ');
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_pktq_out < RTE_DIM(p->pktq_out)),
+ p->name, "pktq_out", (uint32_t)RTE_DIM(p->pktq_out));
- if (end_space && (!end_tab))
- end = end_space;
- else if ((!end_space) && end_tab)
- end = end_tab;
- else if (end_space && end_tab)
- end = RTE_MIN(end_space, end_tab);
- else
- end = NULL;
-
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
-
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
-
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
if (validate_name(name, "TXQ", 2) == 0) {
type = APP_PKTQ_OUT_HWQ;
id = APP_PARAM_ADD(app->hwq_out_params, name);
+ APP_PARAM_ADD_LINK_FOR_TXQ(app, name);
} else if (validate_name(name, "SWQ", 1) == 0) {
type = APP_PKTQ_OUT_SWQ;
id = APP_PARAM_ADD(app->swq_params, name);
} else if (validate_name(name, "TM", 1) == 0) {
type = APP_PKTQ_OUT_TM;
id = APP_PARAM_ADD(app->tm_params, name);
+ APP_PARAM_ADD_LINK_FOR_TM(app, name);
+ } else if (validate_name(name, "KNI", 1) == 0) {
+ type = APP_PKTQ_OUT_KNI;
+ id = APP_PARAM_ADD(app->kni_params, name);
+ APP_PARAM_ADD_LINK_FOR_KNI(app, name);
} else if (validate_name(name, "SINK", 1) == 0) {
type = APP_PKTQ_OUT_SINK;
id = APP_PARAM_ADD(app->sink_params, name);
} else
- return -EINVAL;
-
- if (id < 0)
- return id;
+ PARSE_ERROR_INVALID_ELEMENT(0,
+ p->name, "pktq_out", name);
p->pktq_out[p->n_pktq_out].type = type;
p->pktq_out[p->n_pktq_out].id = id;
p->n_pktq_out++;
}
- return 0;
+ PARSE_ERROR_NO_ELEMENTS((p->n_pktq_out > 0), p->name, "pktq_out");
}
-static int
+static void
parse_pipeline_msgq_in(struct app_params *app,
struct app_pipeline_params *p,
- const char *value)
+ char *value)
{
- const char *next = value;
- char *end;
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
- ssize_t idx;
-
- while (*next != '\0') {
- char *end_space;
- char *end_tab;
-
- next = skip_white_spaces(next);
- if (!next)
- break;
-
- end_space = strchr(next, ' ');
- end_tab = strchr(next, ' ');
+ p->n_msgq_in = 0;
- if (end_space && (!end_tab))
- end = end_space;
- else if ((!end_space) && end_tab)
- end = end_tab;
- else if (end_space && end_tab)
- end = RTE_MIN(end_space, end_tab);
- else
- end = NULL;
+ while (1) {
+ int idx;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
+ if (name == NULL)
+ break;
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_msgq_in < RTE_DIM(p->msgq_in)),
+ p->name, "msgq_in", (uint32_t)(RTE_DIM(p->msgq_in)));
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
-
- if (validate_name(name, "MSGQ", 1) != 0)
- return -EINVAL;
+ PARSE_ERROR_INVALID_ELEMENT(
+ (validate_name(name, "MSGQ", 1) == 0),
+ p->name, "msgq_in", name);
idx = APP_PARAM_ADD(app->msgq_params, name);
- if (idx < 0)
- return idx;
-
p->msgq_in[p->n_msgq_in] = idx;
p->n_msgq_in++;
}
- return 0;
+ PARSE_ERROR_NO_ELEMENTS((p->n_msgq_in > 0), p->name, "msgq_in");
}
-static int
+static void
parse_pipeline_msgq_out(struct app_params *app,
struct app_pipeline_params *p,
- const char *value)
+ char *value)
{
- const char *next = value;
- char *end;
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
- ssize_t idx;
-
- while (*next != '\0') {
- char *end_space;
- char *end_tab;
-
- next = skip_white_spaces(next);
- if (!next)
- break;
-
- end_space = strchr(next, ' ');
- end_tab = strchr(next, ' ');
-
- if (end_space && (!end_tab))
- end = end_space;
- else if ((!end_space) && end_tab)
- end = end_tab;
- else if (end_space && end_tab)
- end = RTE_MIN(end_space, end_tab);
- else
- end = NULL;
+ p->n_msgq_out = 0;
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
+ while (1) {
+ int idx;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
+ if (name == NULL)
+ break;
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_msgq_out < RTE_DIM(p->msgq_out)),
+ p->name, "msgq_out", (uint32_t)RTE_DIM(p->msgq_out));
- if (validate_name(name, "MSGQ", 1) != 0)
- return -EINVAL;
+ PARSE_ERROR_INVALID_ELEMENT(
+ (validate_name(name, "MSGQ", 1) == 0),
+ p->name, "msgq_out", name);
idx = APP_PARAM_ADD(app->msgq_params, name);
- if (idx < 0)
- return idx;
-
p->msgq_out[p->n_msgq_out] = idx;
p->n_msgq_out++;
}
- return 0;
+ PARSE_ERROR_NO_ELEMENTS((p->n_msgq_out > 0), p->name, "msgq_out");
}
static void
@@ -1392,9 +1000,8 @@ parse_pipeline(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->pipeline_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->pipeline_params, section_name);
-
param = &app->pipeline_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1421,38 +1028,26 @@ parse_pipeline(struct app_params *app,
}
if (strcmp(ent->name, "pktq_in") == 0) {
- int status = parse_pipeline_pktq_in(app, param,
- ent->value);
+ parse_pipeline_pktq_in(app, param, ent->value);
- PARSE_ERROR((status == 0), section_name,
- ent->name);
continue;
}
if (strcmp(ent->name, "pktq_out") == 0) {
- int status = parse_pipeline_pktq_out(app, param,
- ent->value);
+ parse_pipeline_pktq_out(app, param, ent->value);
- PARSE_ERROR((status == 0), section_name,
- ent->name);
continue;
}
if (strcmp(ent->name, "msgq_in") == 0) {
- int status = parse_pipeline_msgq_in(app, param,
- ent->value);
+ parse_pipeline_msgq_in(app, param, ent->value);
- PARSE_ERROR((status == 0), section_name,
- ent->name);
continue;
}
if (strcmp(ent->name, "msgq_out") == 0) {
- int status = parse_pipeline_msgq_out(app, param,
- ent->value);
+ parse_pipeline_msgq_out(app, param, ent->value);
- PARSE_ERROR((status == 0), section_name,
- ent->name);
continue;
}
@@ -1466,66 +1061,6 @@ parse_pipeline(struct app_params *app,
continue;
}
- if (strcmp(ent->name, "pcap_file_rd") == 0) {
- int status;
-
-#ifndef RTE_PORT_PCAP
- PARSE_ERROR_INVALID(0, section_name, ent->name);
-#endif
-
- status = parse_pipeline_pcap_source(app,
- param, ent->value, NULL);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pcap_bytes_rd_per_pkt") == 0) {
- int status;
-
-#ifndef RTE_PORT_PCAP
- PARSE_ERROR_INVALID(0, section_name, ent->name);
-#endif
-
- status = parse_pipeline_pcap_source(app,
- param, NULL, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pcap_file_wr") == 0) {
- int status;
-
-#ifndef RTE_PORT_PCAP
- PARSE_ERROR_INVALID(0, section_name, ent->name);
-#endif
-
- status = parse_pipeline_pcap_sink(app, param,
- ent->value, NULL);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pcap_n_pkt_wr") == 0) {
- int status;
-
-#ifndef RTE_PORT_PCAP
- PARSE_ERROR_INVALID(0, section_name, ent->name);
-#endif
-
- status = parse_pipeline_pcap_sink(app, param,
- NULL, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
/* pipeline type specific items */
APP_CHECK((param->n_args < APP_MAX_PIPELINE_ARGS),
"Parse error in section \"%s\": too many "
@@ -1541,17 +1076,13 @@ parse_pipeline(struct app_params *app,
param->n_args++;
}
- param->parsed = 1;
-
snprintf(name, sizeof(name), "MSGQ-REQ-%s", section_name);
param_idx = APP_PARAM_ADD(app->msgq_params, name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
param->msgq_in[param->n_msgq_in++] = param_idx;
snprintf(name, sizeof(name), "MSGQ-RSP-%s", section_name);
param_idx = APP_PARAM_ADD(app->msgq_params, name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
param->msgq_out[param->n_msgq_out++] = param_idx;
@@ -1560,7 +1091,6 @@ parse_pipeline(struct app_params *app,
param->core_id,
(param->hyper_th_id) ? "h" : "");
param_idx = APP_PARAM_ADD(app->msgq_params, name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
snprintf(name, sizeof(name), "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
@@ -1568,7 +1098,6 @@ parse_pipeline(struct app_params *app,
param->core_id,
(param->hyper_th_id) ? "h" : "");
param_idx = APP_PARAM_ADD(app->msgq_params, name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
free(entries);
@@ -1593,9 +1122,8 @@ parse_mempool(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->mempool_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->mempool_params, section_name);
-
param = &app->mempool_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1640,11 +1168,152 @@ parse_mempool(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
+static int
+parse_link_rss_qs(struct app_link_params *p,
+ char *value)
+{
+ p->n_rss_qs = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (p->n_rss_qs == RTE_DIM(p->rss_qs))
+ return -ENOMEM;
+
+ if (parser_read_uint32(&p->rss_qs[p->n_rss_qs++], token))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+parse_link_rss_proto_ipv4(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "IP") == 0) {
+ mask |= ETH_RSS_IPV4;
+ continue;
+ }
+ if (strcmp(token, "FRAG") == 0) {
+ mask |= ETH_RSS_FRAG_IPV4;
+ continue;
+ }
+ if (strcmp(token, "TCP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_TCP;
+ continue;
+ }
+ if (strcmp(token, "UDP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_UDP;
+ continue;
+ }
+ if (strcmp(token, "SCTP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_SCTP;
+ continue;
+ }
+ if (strcmp(token, "OTHER") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_OTHER;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_ipv4 = mask;
+ return 0;
+}
+
+static int
+parse_link_rss_proto_ipv6(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "IP") == 0) {
+ mask |= ETH_RSS_IPV6;
+ continue;
+ }
+ if (strcmp(token, "FRAG") == 0) {
+ mask |= ETH_RSS_FRAG_IPV6;
+ continue;
+ }
+ if (strcmp(token, "TCP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_TCP;
+ continue;
+ }
+ if (strcmp(token, "UDP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_UDP;
+ continue;
+ }
+ if (strcmp(token, "SCTP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_SCTP;
+ continue;
+ }
+ if (strcmp(token, "OTHER") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_OTHER;
+ continue;
+ }
+ if (strcmp(token, "IP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_EX;
+ continue;
+ }
+ if (strcmp(token, "TCP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_TCP_EX;
+ continue;
+ }
+ if (strcmp(token, "UDP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_UDP_EX;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_ipv6 = mask;
+ return 0;
+}
+
+static int
+parse_link_rss_proto_l2(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "L2") == 0) {
+ mask |= ETH_RSS_L2_PAYLOAD;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_l2 = mask;
+ return 0;
+}
+
static void
parse_link(struct app_params *app,
const char *section_name,
@@ -1653,6 +1322,10 @@ parse_link(struct app_params *app,
struct app_link_params *param;
struct rte_cfgfile_entry *entries;
int n_entries, i;
+ int rss_qs_present = 0;
+ int rss_proto_ipv4_present = 0;
+ int rss_proto_ipv6_present = 0;
+ int rss_proto_l2_present = 0;
int pci_bdf_present = 0;
ssize_t param_idx;
@@ -1665,9 +1338,8 @@ parse_link(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->link_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->link_params, section_name);
-
param = &app->link_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1707,7 +1379,6 @@ parse_link(struct app_params *app,
continue;
}
-
if (strcmp(ent->name, "tcp_local_q") == 0) {
int status = parser_read_uint32(
&param->tcp_local_q, ent->value);
@@ -1735,6 +1406,44 @@ parse_link(struct app_params *app,
continue;
}
+ if (strcmp(ent->name, "rss_qs") == 0) {
+ int status = parse_link_rss_qs(param, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ rss_qs_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_ipv4") == 0) {
+ int status =
+ parse_link_rss_proto_ipv4(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_ipv4_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_ipv6") == 0) {
+ int status =
+ parse_link_rss_proto_ipv6(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_ipv6_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_l2") == 0) {
+ int status = parse_link_rss_proto_l2(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_l2_present = 1;
+ continue;
+ }
+
if (strcmp(ent->name, "pci_bdf") == 0) {
PARSE_ERROR_DUPLICATE((pci_bdf_present == 0),
section_name, ent->name);
@@ -1760,7 +1469,28 @@ parse_link(struct app_params *app,
"this entry is mandatory (port_mask is not "
"provided)");
- param->parsed = 1;
+ if (rss_proto_ipv4_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_ipv4",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_ipv6_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_ipv6",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_l2_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_l2",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_ipv4_present |
+ rss_proto_ipv6_present |
+ rss_proto_l2_present){
+ if (rss_proto_ipv4_present == 0)
+ param->rss_proto_ipv4 = 0;
+ if (rss_proto_ipv6_present == 0)
+ param->rss_proto_ipv6 = 0;
+ if (rss_proto_l2_present == 0)
+ param->rss_proto_l2 = 0;
+ }
free(entries);
}
@@ -1784,9 +1514,10 @@ parse_rxq(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->hwq_in_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->hwq_in_params, section_name);
-
param = &app->hwq_in_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_RXQ(app, section_name);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1798,10 +1529,8 @@ parse_rxq(struct app_params *app,
PARSE_ERROR((status == 0), section_name,
ent->name);
- idx = APP_PARAM_ADD(app->mempool_params,
- ent->value);
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
- section_name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
param->mempool_id = idx;
continue;
}
@@ -1828,8 +1557,6 @@ parse_rxq(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -1852,9 +1579,10 @@ parse_txq(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->hwq_out_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->hwq_out_params, section_name);
-
param = &app->hwq_out_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_TXQ(app, section_name);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1887,12 +1615,19 @@ parse_txq(struct app_params *app,
continue;
}
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
/* unrecognized */
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -1920,9 +1655,8 @@ parse_swq(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->swq_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->swq_params, section_name);
-
param = &app->swq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2050,11 +1784,9 @@ parse_swq(struct app_params *app,
PARSE_ERROR((status == 0), section_name,
ent->name);
- idx = APP_PARAM_ADD(app->mempool_params,
- ent->value);
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
- section_name);
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
param->mempool_direct_id = idx;
+
mempool_direct_present = 1;
continue;
}
@@ -2066,11 +1798,10 @@ parse_swq(struct app_params *app,
PARSE_ERROR((status == 0), section_name,
ent->name);
- idx = APP_PARAM_ADD(app->mempool_params,
- ent->value);
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
- section_name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
param->mempool_indirect_id = idx;
+
mempool_indirect_present = 1;
continue;
}
@@ -2079,32 +1810,30 @@ parse_swq(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- APP_CHECK(((mtu_present) &&
+ APP_CHECK(((mtu_present == 0) ||
((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
"Parse error in section \"%s\": IPv4/IPv6 fragmentation "
"is off, therefore entry \"mtu\" is not allowed",
section_name);
- APP_CHECK(((metadata_size_present) &&
+ APP_CHECK(((metadata_size_present == 0) ||
((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
"Parse error in section \"%s\": IPv4/IPv6 fragmentation "
"is off, therefore entry \"metadata_size\" is "
"not allowed", section_name);
- APP_CHECK(((mempool_direct_present) &&
+ APP_CHECK(((mempool_direct_present == 0) ||
((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
"Parse error in section \"%s\": IPv4/IPv6 fragmentation "
"is off, therefore entry \"mempool_direct\" is "
"not allowed", section_name);
- APP_CHECK(((mempool_indirect_present) &&
+ APP_CHECK(((mempool_indirect_present == 0) ||
((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
"Parse error in section \"%s\": IPv4/IPv6 fragmentation "
"is off, therefore entry \"mempool_indirect\" is "
"not allowed", section_name);
- param->parsed = 1;
-
free(entries);
}
@@ -2127,9 +1856,10 @@ parse_tm(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->tm_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->tm_params, section_name);
-
param = &app->tm_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_TM(app, section_name);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2162,7 +1892,101 @@ parse_tm(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
+ free(entries);
+}
+
+static void
+parse_kni(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_kni_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->kni_params, section_name);
+ param = &app->kni_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_KNI(app, section_name);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "core") == 0) {
+ int status = parse_pipeline_core(
+ &param->socket_id,
+ &param->core_id,
+ &param->hyper_th_id,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ param->force_bind = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(&param->burst_read,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(&param->burst_write,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
free(entries);
}
@@ -2188,9 +2012,8 @@ parse_source(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->source_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->source_params, section_name);
-
param = &app->source_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2202,10 +2025,8 @@ parse_source(struct app_params *app,
PARSE_ERROR((status == 0), section_name,
ent->name);
- idx = APP_PARAM_ADD(app->mempool_params,
- ent->value);
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
- section_name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
param->mempool_id = idx;
continue;
}
@@ -2219,7 +2040,7 @@ parse_source(struct app_params *app,
continue;
}
- if (strcmp(ent->name, "pcap_file_rd")) {
+ if (strcmp(ent->name, "pcap_file_rd") == 0) {
PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
section_name, ent->name);
@@ -2251,8 +2072,6 @@ parse_source(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -2277,14 +2096,13 @@ parse_sink(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->sink_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->sink_params, section_name);
-
param = &app->sink_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
- if (strcmp(ent->name, "pcap_file_wr")) {
+ if (strcmp(ent->name, "pcap_file_wr") == 0) {
PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
section_name, ent->name);
@@ -2295,7 +2113,7 @@ parse_sink(struct app_params *app,
continue;
}
- if (strcmp(ent->name, "pcap_n_pkt_wr")) {
+ if (strcmp(ent->name, "pcap_n_pkt_wr") == 0) {
int status;
PARSE_ERROR_DUPLICATE((pcap_n_pkt_present == 0),
@@ -2314,8 +2132,6 @@ parse_sink(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -2338,9 +2154,8 @@ parse_msgq_req_pipeline(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
-
param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2358,7 +2173,6 @@ parse_msgq_req_pipeline(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
free(entries);
}
@@ -2381,9 +2195,8 @@ parse_msgq_rsp_pipeline(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
-
param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2401,8 +2214,6 @@ parse_msgq_rsp_pipeline(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -2425,9 +2236,8 @@ parse_msgq(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
-
param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2454,8 +2264,6 @@ parse_msgq(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -2478,6 +2286,7 @@ static const struct config_section cfg_file_scheme[] = {
{"TXQ", 2, parse_txq},
{"SWQ", 1, parse_swq},
{"TM", 1, parse_tm},
+ {"KNI", 1, parse_kni},
{"SOURCE", 1, parse_source},
{"SINK", 1, parse_sink},
{"MSGQ-REQ-PIPELINE", 1, parse_msgq_req_pipeline},
@@ -2488,10 +2297,7 @@ static const struct config_section cfg_file_scheme[] = {
static void
create_implicit_mempools(struct app_params *app)
{
- ssize_t idx;
-
- idx = APP_PARAM_ADD(app->mempool_params, "MEMPOOL0");
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params, "start-up");
+ APP_PARAM_ADD(app->mempool_params, "MEMPOOL0");
}
static void
@@ -2510,7 +2316,6 @@ create_implicit_links_from_port_mask(struct app_params *app,
snprintf(name, sizeof(name), "LINK%" PRIu32, link_id);
idx = APP_PARAM_ADD(app->link_params, name);
- PARSER_PARAM_ADD_CHECK(idx, app->link_params, name);
app->link_params[idx].pmd_id = pmd_id;
link_id++;
@@ -2525,6 +2330,11 @@ assign_link_pmd_id_from_pci_bdf(struct app_params *app)
for (i = 0; i < app->n_links; i++) {
struct app_link_params *link = &app->link_params[i];
+ APP_CHECK((strlen(link->pci_bdf)),
+ "Parse error: %s pci_bdf is not configured "
+ "(port_mask is not provided)",
+ link->name);
+
link->pmd_id = i;
}
}
@@ -2615,28 +2425,12 @@ app_config_parse(struct app_params *app, const char *file_name)
APP_PARAM_COUNT(app->hwq_out_params, app->n_pktq_hwq_out);
APP_PARAM_COUNT(app->swq_params, app->n_pktq_swq);
APP_PARAM_COUNT(app->tm_params, app->n_pktq_tm);
+ APP_PARAM_COUNT(app->kni_params, app->n_pktq_kni);
APP_PARAM_COUNT(app->source_params, app->n_pktq_source);
APP_PARAM_COUNT(app->sink_params, app->n_pktq_sink);
APP_PARAM_COUNT(app->msgq_params, app->n_msgq);
APP_PARAM_COUNT(app->pipeline_params, app->n_pipelines);
-#ifdef RTE_PORT_PCAP
- for (i = 0; i < (int)app->n_pktq_source; i++) {
- struct app_pktq_source_params *p = &app->source_params[i];
-
- APP_CHECK((p->file_name), "Parse error: missing "
- "mandatory field \"pcap_file_rd\" for \"%s\"",
- p->name);
- }
-#else
- for (i = 0; i < (int)app->n_pktq_source; i++) {
- struct app_pktq_source_params *p = &app->source_params[i];
-
- APP_CHECK((!p->file_name), "Parse error: invalid field "
- "\"pcap_file_rd\" for \"%s\"", p->name);
- }
-#endif
-
if (app->port_mask == 0)
assign_link_pmd_id_from_pci_bdf(app);
@@ -2803,6 +2597,84 @@ save_links_params(struct app_params *app, FILE *f)
fprintf(f, "%s = %" PRIu32 "\n", "sctp_local_q",
p->sctp_local_q);
+ if (p->n_rss_qs) {
+ uint32_t j;
+
+ /* rss_qs */
+ fprintf(f, "rss_qs = ");
+ for (j = 0; j < p->n_rss_qs; j++)
+ fprintf(f, "%" PRIu32 " ", p->rss_qs[j]);
+ fputc('\n', f);
+
+ /* rss_proto_ipv4 */
+ if (p->rss_proto_ipv4) {
+ fprintf(f, "rss_proto_ipv4 = ");
+ if (p->rss_proto_ipv4 & ETH_RSS_IPV4)
+ fprintf(f, "IP ");
+ if (p->rss_proto_ipv4 & ETH_RSS_FRAG_IPV4)
+ fprintf(f, "FRAG ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_TCP)
+ fprintf(f, "TCP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_UDP)
+ fprintf(f, "UDP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_SCTP)
+ fprintf(f, "SCTP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+ fprintf(f, "OTHER ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
+
+ /* rss_proto_ipv6 */
+ if (p->rss_proto_ipv6) {
+ fprintf(f, "rss_proto_ipv6 = ");
+ if (p->rss_proto_ipv6 & ETH_RSS_IPV6)
+ fprintf(f, "IP ");
+ if (p->rss_proto_ipv6 & ETH_RSS_FRAG_IPV6)
+ fprintf(f, "FRAG ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_TCP)
+ fprintf(f, "TCP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_UDP)
+ fprintf(f, "UDP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+ fprintf(f, "SCTP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_OTHER)
+ fprintf(f, "OTHER ");
+ if (p->rss_proto_ipv6 & ETH_RSS_IPV6_EX)
+ fprintf(f, "IP_EX ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_IPV6_TCP_EX)
+ fprintf(f, "TCP_EX ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_IPV6_UDP_EX)
+ fprintf(f, "UDP_EX ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
+
+ /* rss_proto_l2 */
+ if (p->rss_proto_l2) {
+ fprintf(f, "rss_proto_l2 = ");
+ if (p->rss_proto_l2 & ETH_RSS_L2_PAYLOAD)
+ fprintf(f, "L2 ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_l2 = <NONE>\n");
+ } else {
+ fprintf(f, "; rss_qs = <NONE>\n");
+ fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
+ fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
+ fprintf(f, "; rss_proto_l2 = <NONE>\n");
+ }
+
if (strlen(p->pci_bdf))
fprintf(f, "%s = %s\n", "pci_bdf", p->pci_bdf);
@@ -2851,6 +2723,7 @@ save_txq_params(struct app_params *app, FILE *f)
fprintf(f, "%s = %s\n",
"dropless",
p->dropless ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
fputc('\n', f);
}
@@ -2916,6 +2789,53 @@ save_tm_params(struct app_params *app, FILE *f)
}
static void
+save_kni_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_kni_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->kni_params);
+ for (i = 0; i < count; i++) {
+ p = &app->kni_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ /* section name */
+ fprintf(f, "[%s]\n", p->name);
+
+ /* core */
+ if (p->force_bind) {
+ fprintf(f, "; force_bind = 1\n");
+ fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
+ p->socket_id,
+ p->core_id,
+ (p->hyper_th_id) ? "h" : "");
+ } else
+ fprintf(f, "; force_bind = 0\n");
+
+ /* mempool */
+ fprintf(f, "%s = %s\n", "mempool",
+ app->mempool_params[p->mempool_id].name);
+
+ /* burst_read */
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+
+ /* burst_write */
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+
+ /* dropless */
+ fprintf(f, "%s = %s\n",
+ "dropless",
+ p->dropless ? "yes" : "no");
+
+ /* n_retries */
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+
+ fputc('\n', f);
+ }
+}
+
+static void
save_source_params(struct app_params *app, FILE *f)
{
struct app_pktq_source_params *p;
@@ -3022,6 +2942,9 @@ save_pipeline_params(struct app_params *app, FILE *f)
case APP_PKTQ_IN_TM:
name = app->tm_params[pp->id].name;
break;
+ case APP_PKTQ_IN_KNI:
+ name = app->kni_params[pp->id].name;
+ break;
case APP_PKTQ_IN_SOURCE:
name = app->source_params[pp->id].name;
break;
@@ -3056,6 +2979,9 @@ save_pipeline_params(struct app_params *app, FILE *f)
case APP_PKTQ_OUT_TM:
name = app->tm_params[pp->id].name;
break;
+ case APP_PKTQ_OUT_KNI:
+ name = app->kni_params[pp->id].name;
+ break;
case APP_PKTQ_OUT_SINK:
name = app->sink_params[pp->id].name;
break;
@@ -3141,6 +3067,7 @@ app_config_save(struct app_params *app, const char *file_name)
save_txq_params(app, file);
save_swq_params(app, file);
save_tm_params(app, file);
+ save_kni_params(app, file);
save_source_params(app, file);
save_sink_params(app, file);
save_msgq_params(app, file);
@@ -3156,6 +3083,10 @@ app_config_init(struct app_params *app)
memcpy(app, &app_params_default, sizeof(struct app_params));
+ /* configure default_source_params */
+ default_source_params.file_name = strdup("./config/packets.pcap");
+ PARSE_ERROR_MALLOC(default_source_params.file_name != NULL);
+
for (i = 0; i < RTE_DIM(app->mempool_params); i++)
memcpy(&app->mempool_params[i],
&mempool_params_default,
@@ -3186,6 +3117,11 @@ app_config_init(struct app_params *app)
&default_tm_params,
sizeof(default_tm_params));
+ for (i = 0; i < RTE_DIM(app->kni_params); i++)
+ memcpy(&app->kni_params[i],
+ &default_kni_params,
+ sizeof(default_kni_params));
+
for (i = 0; i < RTE_DIM(app->source_params); i++)
memcpy(&app->source_params[i],
&default_source_params,
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index 83422e88..cd167f61 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -55,6 +55,8 @@
#define APP_NAME_SIZE 32
+#define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+
static void
app_init_core_map(struct app_params *app)
{
@@ -902,6 +904,67 @@ app_get_cpu_socket_id(uint32_t pmd_id)
return (status != SOCKET_ID_ANY) ? status : 0;
}
+static inline int
+app_link_rss_enabled(struct app_link_params *cp)
+{
+ return (cp->n_rss_qs) ? 1 : 0;
+}
+
+static void
+app_link_rss_setup(struct app_link_params *cp)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
+ uint32_t i;
+ int status;
+
+ /* Get RETA size */
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(cp->pmd_id, &dev_info);
+
+ if (dev_info.reta_size == 0)
+ rte_panic("%s (%u): RSS setup error (null RETA size)\n",
+ cp->name, cp->pmd_id);
+
+ if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
+ rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
+ cp->name, cp->pmd_id);
+
+ /* Setup RETA contents */
+ memset(reta_conf, 0, sizeof(reta_conf));
+
+ for (i = 0; i < dev_info.reta_size; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < dev_info.reta_size; i++) {
+ uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
+ uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+ uint32_t rss_qs_pos = i % cp->n_rss_qs;
+
+ reta_conf[reta_id].reta[reta_pos] =
+ (uint16_t) cp->rss_qs[rss_qs_pos];
+ }
+
+ /* RETA update */
+ status = rte_eth_dev_rss_reta_update(cp->pmd_id,
+ reta_conf,
+ dev_info.reta_size);
+ if (status != 0)
+ rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
+ cp->name, cp->pmd_id);
+}
+
+static void
+app_init_link_set_config(struct app_link_params *p)
+{
+ if (p->n_rss_qs) {
+ p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
+ p->rss_proto_ipv6 |
+ p->rss_proto_l2;
+ }
+}
+
static void
app_init_link(struct app_params *app)
{
@@ -917,6 +980,7 @@ app_init_link(struct app_params *app)
sscanf(p_link->name, "LINK%" PRIu32, &link_id);
n_hwq_in = app_link_get_n_rxq(app, p_link);
n_hwq_out = app_link_get_n_txq(app, p_link);
+ app_init_link_set_config(p_link);
APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
"(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
@@ -1001,9 +1065,13 @@ app_init_link(struct app_params *app)
rte_panic("Cannot start %s (error %" PRId32 ")\n",
p_link->name, status);
- /* LINK UP */
+ /* LINK FILTERS */
app_link_set_arp_filter(app, p_link);
app_link_set_tcp_syn_filter(app, p_link);
+ if (app_link_rss_enabled(p_link))
+ app_link_rss_setup(p_link);
+
+ /* LINK UP */
app_link_up_internal(app, p_link);
}
@@ -1108,6 +1176,111 @@ app_init_tm(struct app_params *app)
}
}
+#ifdef RTE_LIBRTE_KNI
+static int
+kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
+ int ret = 0;
+
+ if (port_id >= rte_eth_dev_count())
+ return -EINVAL;
+
+ ret = (if_up) ?
+ rte_eth_dev_set_link_up(port_id) :
+ rte_eth_dev_set_link_down(port_id);
+
+ return ret;
+}
+
+static int
+kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
+ int ret;
+
+ if (port_id >= rte_eth_dev_count())
+ return -EINVAL;
+
+ if (new_mtu > ETHER_MAX_LEN)
+ return -EINVAL;
+
+ /* Set new MTU */
+ ret = rte_eth_dev_set_mtu(port_id, new_mtu);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#endif /* RTE_LIBRTE_KNI */
+
+#ifndef RTE_LIBRTE_KNI
+static void
+app_init_kni(struct app_params *app) {
+ if (app->n_pktq_kni == 0)
+ return;
+
+ rte_panic("Can not init KNI without librte_kni support.\n");
+}
+#else
+static void
+app_init_kni(struct app_params *app) {
+ uint32_t i;
+
+ if (app->n_pktq_kni == 0)
+ return;
+
+ rte_kni_init(app->n_pktq_kni);
+
+ for (i = 0; i < app->n_pktq_kni; i++) {
+ struct app_pktq_kni_params *p_kni = &app->kni_params[i];
+ struct app_link_params *p_link;
+ struct rte_eth_dev_info dev_info;
+ struct app_mempool_params *mempool_params;
+ struct rte_mempool *mempool;
+ struct rte_kni_conf conf;
+ struct rte_kni_ops ops;
+
+ /* LINK */
+ p_link = app_get_link_for_kni(app, p_kni);
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
+
+ /* MEMPOOL */
+ mempool_params = &app->mempool_params[p_kni->mempool_id];
+ mempool = app->mempool[p_kni->mempool_id];
+
+ /* KNI */
+ memset(&conf, 0, sizeof(conf));
+ snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
+ conf.force_bind = p_kni->force_bind;
+ if (conf.force_bind) {
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ p_kni->socket_id,
+ p_kni->core_id,
+ p_kni->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("%s invalid CPU core\n", p_kni->name);
+
+ conf.core_id = (uint32_t) lcore_id;
+ }
+ conf.group_id = p_link->pmd_id;
+ conf.mbuf_size = mempool_params->buffer_size;
+ conf.addr = dev_info.pci_dev->addr;
+ conf.id = dev_info.pci_dev->id;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.port_id = (uint8_t) p_link->pmd_id;
+ ops.change_mtu = kni_change_mtu;
+ ops.config_network_if = kni_config_network_interface;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
+ app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
+ if (!app->kni[i])
+ rte_panic("%s init error\n", p_kni->name);
+ }
+}
+#endif /* RTE_LIBRTE_KNI */
+
static void
app_init_msgq(struct app_params *app)
{
@@ -1128,15 +1301,16 @@ app_init_msgq(struct app_params *app)
}
}
-static void app_pipeline_params_get(struct app_params *app,
+void app_pipeline_params_get(struct app_params *app,
struct app_pipeline_params *p_in,
struct pipeline_params *p_out)
{
uint32_t i;
- uint32_t mempool_id;
snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
+ snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
+
p_out->socket_id = (int) p_in->socket_id;
p_out->log_level = app->log_level;
@@ -1212,34 +1386,35 @@ static void app_pipeline_params_get(struct app_params *app,
break;
}
case APP_PKTQ_IN_TM:
+ {
out->type = PIPELINE_PORT_IN_SCHED_READER;
out->params.sched.sched = app->tm[in->id];
out->burst_size = app->tm_params[in->id].burst_read;
break;
+ }
+#ifdef RTE_LIBRTE_KNI
+ case APP_PKTQ_IN_KNI:
+ {
+ out->type = PIPELINE_PORT_IN_KNI_READER;
+ out->params.kni.kni = app->kni[in->id];
+ out->burst_size = app->kni_params[in->id].burst_read;
+ break;
+ }
+#endif /* RTE_LIBRTE_KNI */
case APP_PKTQ_IN_SOURCE:
- mempool_id = app->source_params[in->id].mempool_id;
+ {
+ uint32_t mempool_id =
+ app->source_params[in->id].mempool_id;
+
out->type = PIPELINE_PORT_IN_SOURCE;
out->params.source.mempool = app->mempool[mempool_id];
out->burst_size = app->source_params[in->id].burst;
-
-#ifdef RTE_NEXT_ABI
- if (app->source_params[in->id].file_name
- != NULL) {
- out->params.source.file_name = strdup(
- app->source_params[in->id].
- file_name);
- if (out->params.source.file_name == NULL) {
- out->params.source.
- n_bytes_per_pkt = 0;
- break;
- }
- out->params.source.n_bytes_per_pkt =
- app->source_params[in->id].
- n_bytes_per_pkt;
- }
-#endif
-
+ out->params.source.file_name =
+ app->source_params[in->id].file_name;
+ out->params.source.n_bytes_per_pkt =
+ app->source_params[in->id].n_bytes_per_pkt;
break;
+ }
default:
break;
}
@@ -1350,7 +1525,8 @@ static void app_pipeline_params_get(struct app_params *app,
}
break;
}
- case APP_PKTQ_OUT_TM: {
+ case APP_PKTQ_OUT_TM:
+ {
struct rte_port_sched_writer_params *params =
&out->params.sched;
@@ -1360,24 +1536,45 @@ static void app_pipeline_params_get(struct app_params *app,
app->tm_params[in->id].burst_write;
break;
}
- case APP_PKTQ_OUT_SINK:
- out->type = PIPELINE_PORT_OUT_SINK;
- if (app->sink_params[in->id].file_name != NULL) {
- out->params.sink.file_name = strdup(
- app->sink_params[in->id].
- file_name);
- if (out->params.sink.file_name == NULL) {
- out->params.sink.max_n_pkts = 0;
- break;
- }
- out->params.sink.max_n_pkts =
- app->sink_params[in->id].
- n_pkts_to_dump;
+#ifdef RTE_LIBRTE_KNI
+ case APP_PKTQ_OUT_KNI:
+ {
+ struct app_pktq_kni_params *p_kni =
+ &app->kni_params[in->id];
+
+ if (p_kni->dropless == 0) {
+ struct rte_port_kni_writer_params *params =
+ &out->params.kni;
+
+ out->type = PIPELINE_PORT_OUT_KNI_WRITER;
+ params->kni = app->kni[in->id];
+ params->tx_burst_sz =
+ app->kni_params[in->id].burst_write;
} else {
- out->params.sink.file_name = NULL;
- out->params.sink.max_n_pkts = 0;
+ struct rte_port_kni_writer_nodrop_params
+ *params = &out->params.kni_nodrop;
+
+ out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
+ params->kni = app->kni[in->id];
+ params->tx_burst_sz =
+ app->kni_params[in->id].burst_write;
+ params->n_retries =
+ app->kni_params[in->id].n_retries;
}
break;
+ }
+#endif /* RTE_LIBRTE_KNI */
+ case APP_PKTQ_OUT_SINK:
+ {
+ out->type = PIPELINE_PORT_OUT_SINK;
+ out->params.sink.file_name =
+ app->sink_params[in->id].file_name;
+ out->params.sink.max_n_pkts =
+ app->sink_params[in->id].
+ n_pkts_to_dump;
+
+ break;
+ }
default:
break;
}
@@ -1449,6 +1646,27 @@ app_init_pipelines(struct app_params *app)
}
static void
+app_post_init_pipelines(struct app_params *app)
+{
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ int status;
+
+ if (data->ptype->fe_ops->f_post_init == NULL)
+ continue;
+
+ status = data->ptype->fe_ops->f_post_init(data->fe);
+ if (status)
+ rte_panic("Pipeline instance \"%s\" front-end "
+ "post-init error\n", params->name);
+ }
+}
+
+static void
app_init_threads(struct app_params *app)
{
uint64_t time = rte_get_tsc_cycles();
@@ -1534,6 +1752,7 @@ int app_init(struct app_params *app)
app_init_link(app);
app_init_swq(app);
app_init_tm(app);
+ app_init_kni(app);
app_init_msgq(app);
app_pipeline_common_cmd_push(app);
@@ -1551,6 +1770,13 @@ int app_init(struct app_params *app)
return 0;
}
+int app_post_init(struct app_params *app)
+{
+ app_post_init_pipelines(app);
+
+ return 0;
+}
+
static int
app_pipeline_type_cmd_push(struct app_params *app,
struct pipeline_type *ptype)
diff --git a/examples/ip_pipeline/parser.c b/examples/ip_pipeline/parser.c
new file mode 100644
index 00000000..689e2065
--- /dev/null
+++ b/examples/ip_pipeline/parser.c
@@ -0,0 +1,745 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For my_ether_aton() function:
+ *
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For inet_pton4() and inet_pton6() functions:
+ *
+ * Copyright (c) 1996 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
+ * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
+ * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if ((string == NULL) ||
+ (tokens == NULL) ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if ((i == *n_tokens) &&
+ (NULL != strtok_r(string, PARSE_DELIMITER, &string)))
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+{
+ uint32_t n_max_labels = *n_labels, count = 0;
+
+ /* Check for void list of labels */
+ if (strcmp(string, "<void>") == 0) {
+ *n_labels = 0;
+ return 0;
+ }
+
+ /* At least one label should be present */
+ for ( ; (*string != '\0'); ) {
+ char *next;
+ int value;
+
+ if (count >= n_max_labels)
+ return -1;
+
+ if (count > 0) {
+ if (string[0] != ':')
+ return -1;
+
+ string++;
+ }
+
+ value = strtol(string, &next, 10);
+ if (next == string)
+ return -1;
+ string = next;
+
+ labels[count++] = (uint32_t) value;
+ }
+
+ *n_labels = count;
+ return 0;
+}
+
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char) ((val >> 8) & 0xff);
+ *tp++ = (unsigned char) (val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char) ((val >> 8) & 0xff);
+ *tp++ = (unsigned char) (val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+static struct ether_addr *
+my_ether_aton(const char *a)
+{
+ int i;
+ char *end;
+ unsigned long o[ETHER_ADDR_LEN];
+ static struct ether_addr ether_addr;
+
+ i = 0;
+ do {
+ errno = 0;
+ o[i] = strtoul(a, &end, 16);
+ if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+ return NULL;
+ a = end + 1;
+ } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0);
+
+ /* Junk at the end of line */
+ if (end[0] != 0)
+ return NULL;
+
+ /* Support the format XX:XX:XX:XX:XX:XX */
+ if (i == ETHER_ADDR_LEN) {
+ while (i-- != 0) {
+ if (o[i] > UINT8_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i] = (uint8_t)o[i];
+ }
+ /* Support the format XXXX:XXXX:XXXX */
+ } else if (i == ETHER_ADDR_LEN / 2) {
+ while (i-- != 0) {
+ if (o[i] > UINT16_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8);
+ ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
+ }
+ /* unknown format */
+ } else
+ return NULL;
+
+ return (struct ether_addr *)&ether_addr;
+}
+
+int
+parse_ipv4_addr(const char *token, struct in_addr *ipv4)
+{
+ if (strlen(token) >= INET_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton4(token, (unsigned char *)ipv4) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+parse_ipv6_addr(const char *token, struct in6_addr *ipv6)
+{
+ if (strlen(token) >= INET6_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton6(token, (unsigned char *)ipv6) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+parse_mac_addr(const char *token, struct ether_addr *addr)
+{
+ struct ether_addr *tmp;
+
+ tmp = my_ether_aton(token);
+ if (tmp == NULL)
+ return -1;
+
+ memcpy(addr, tmp, sizeof(struct ether_addr));
+ return 0;
+}
+
+int
+parse_pipeline_core(uint32_t *socket,
+ uint32_t *core,
+ uint32_t *ht,
+ const char *entry)
+{
+ size_t num_len;
+ char num[8];
+
+ uint32_t s = 0, c = 0, h = 0, val;
+ uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
+ const char *next = skip_white_spaces(entry);
+ char type;
+
+ /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
+ while (*next != '\0') {
+ /* If everything parsed nothing should left */
+ if (s_parsed && c_parsed && h_parsed)
+ return -EINVAL;
+
+ type = *next;
+ switch (type) {
+ case 's':
+ case 'S':
+ if (s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+ s_parsed = 1;
+ next++;
+ break;
+ case 'c':
+ case 'C':
+ if (c_parsed || h_parsed)
+ return -EINVAL;
+ c_parsed = 1;
+ next++;
+ break;
+ case 'h':
+ case 'H':
+ if (h_parsed)
+ return -EINVAL;
+ h_parsed = 1;
+ next++;
+ break;
+ default:
+ /* If it start from digit it must be only core id. */
+ if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+
+ type = 'C';
+ }
+
+ for (num_len = 0; *next != '\0'; next++, num_len++) {
+ if (num_len == RTE_DIM(num))
+ return -EINVAL;
+
+ if (!isdigit(*next))
+ break;
+
+ num[num_len] = *next;
+ }
+
+ if (num_len == 0 && type != 'h' && type != 'H')
+ return -EINVAL;
+
+ if (num_len != 0 && (type == 'h' || type == 'H'))
+ return -EINVAL;
+
+ num[num_len] = '\0';
+ val = strtol(num, NULL, 10);
+
+ h = 0;
+ switch (type) {
+ case 's':
+ case 'S':
+ s = val;
+ break;
+ case 'c':
+ case 'C':
+ c = val;
+ break;
+ case 'h':
+ case 'H':
+ h = 1;
+ break;
+ }
+ }
+
+ *socket = s;
+ *core = c;
+ *ht = h;
+ return 0;
+}
diff --git a/examples/ip_pipeline/parser.h b/examples/ip_pipeline/parser.h
index 58b59daf..9bd36af3 100644
--- a/examples/ip_pipeline/parser.h
+++ b/examples/ip_pipeline/parser.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,17 +34,51 @@
#ifndef __INCLUDE_PARSER_H__
#define __INCLUDE_PARSER_H__
-int
-parser_read_arg_bool(const char *p);
+#include <stdint.h>
-int
-parser_read_uint64(uint64_t *value, const char *p);
+#include <rte_ip.h>
+#include <rte_ether.h>
-int
-parser_read_uint32(uint32_t *value, const char *p);
+#define PARSE_DELIMITER " \f\n\r\t\v"
-int
-parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
-#endif
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int parser_read_arg_bool(const char *p);
+
+int parser_read_uint64(uint64_t *value, const char *p);
+int parser_read_uint32(uint32_t *value, const char *p);
+int parser_read_uint16(uint16_t *value, const char *p);
+int parser_read_uint8(uint8_t *value, const char *p);
+int parser_read_uint64_hex(uint64_t *value, const char *p);
+int parser_read_uint32_hex(uint32_t *value, const char *p);
+int parser_read_uint16_hex(uint16_t *value, const char *p);
+int parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int parse_ipv4_addr(const char *token, struct in_addr *ipv4);
+int parse_ipv6_addr(const char *token, struct in6_addr *ipv6);
+int parse_mac_addr(const char *token, struct ether_addr *addr);
+int parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels);
+
+int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
+
+#endif
diff --git a/examples/ip_pipeline/pipeline.h b/examples/ip_pipeline/pipeline.h
index dab9c36d..14a551db 100644
--- a/examples/ip_pipeline/pipeline.h
+++ b/examples/ip_pipeline/pipeline.h
@@ -42,13 +42,22 @@
* Pipeline type front-end operations
*/
-typedef void* (*pipeline_fe_op_init)(struct pipeline_params *params, void *arg);
+typedef void* (*pipeline_fe_op_init)(struct pipeline_params *params,
+ void *arg);
+
+typedef int (*pipeline_fe_op_post_init)(void *pipeline);
typedef int (*pipeline_fe_op_free)(void *pipeline);
+typedef int (*pipeline_fe_op_track)(struct pipeline_params *params,
+ uint32_t port_in,
+ uint32_t *port_out);
+
struct pipeline_fe_ops {
pipeline_fe_op_init f_init;
+ pipeline_fe_op_post_init f_post_init;
pipeline_fe_op_free f_free;
+ pipeline_fe_op_track f_track;
cmdline_parse_ctx_t *cmds;
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.c b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
index a691d422..cd1d082a 100644
--- a/examples/ip_pipeline/pipeline/pipeline_common_fe.c
+++ b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,146 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
-#include <cmdline_socket.h>
#include <cmdline.h>
#include "pipeline_common_fe.h"
+#include "parser.h"
+
+struct app_link_params *
+app_pipeline_track_pktq_out_to_link(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t pktq_out_id)
+{
+ struct app_pipeline_params *p;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return NULL;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return NULL;
+
+ for ( ; ; ) {
+ struct app_pktq_out_params *pktq_out =
+ &p->pktq_out[pktq_out_id];
+
+ switch (pktq_out->type) {
+ case APP_PKTQ_OUT_HWQ:
+ {
+ struct app_pktq_hwq_out_params *hwq_out;
+
+ hwq_out = &app->hwq_out_params[pktq_out->id];
+
+ return app_get_link_for_txq(app, hwq_out);
+ }
+
+ case APP_PKTQ_OUT_SWQ:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_swq_params *swq;
+ uint32_t pktq_in_id;
+ int status;
+
+ swq = &app->swq_params[pktq_out->id];
+ p = app_swq_get_reader(app, swq, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_TM:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_tm_params *tm;
+ uint32_t pktq_in_id;
+ int status;
+
+ tm = &app->tm_params[pktq_out->id];
+ p = app_tm_get_reader(app, tm, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_KNI:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_kni_params *kni;
+ uint32_t pktq_in_id;
+ int status;
+
+ kni = &app->kni_params[pktq_out->id];
+ p = app_kni_get_reader(app, kni, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_SINK:
+ default:
+ return NULL;
+ }
+ }
+}
+
+int
+app_pipeline_track_default(struct pipeline_params *p,
+ uint32_t port_in,
+ uint32_t *port_out)
+{
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_out == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
int
app_pipeline_ping(struct app_params *app,
@@ -312,6 +446,40 @@ app_pipeline_port_in_disable(struct app_params *app,
}
int
+app_link_set_op(struct app_params *app,
+ uint32_t link_id,
+ uint32_t pipeline_id,
+ app_link_op op,
+ void *arg)
+{
+ struct app_pipeline_params *pp;
+ struct app_link_params *lp;
+ struct app_link_data *ld;
+ uint32_t ppos, lpos;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (op == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, lp);
+ if (lp == NULL)
+ return -1;
+ lpos = lp - app->link_params;
+ ld = &app->link_data[lpos];
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, pp);
+ if (pp == NULL)
+ return -1;
+ ppos = pp - app->pipeline_params;
+
+ ld->f_link[ppos] = op;
+ ld->arg[ppos] = arg;
+
+ return 0;
+}
+
+int
app_link_config(struct app_params *app,
uint32_t link_id,
uint32_t ip,
@@ -382,6 +550,8 @@ app_link_up(struct app_params *app,
uint32_t link_id)
{
struct app_link_params *p;
+ struct app_link_data *d;
+ int i;
/* Check input arguments */
if (app == NULL)
@@ -394,6 +564,8 @@ app_link_up(struct app_params *app,
return -1;
}
+ d = &app->link_data[p - app->link_params];
+
/* Check link state */
if (p->state) {
APP_LOG(app, HIGH, "%s is already UP", p->name);
@@ -408,6 +580,11 @@ app_link_up(struct app_params *app,
app_link_up_internal(app, p);
+ /* Callbacks */
+ for (i = 0; i < APP_MAX_PIPELINES; i++)
+ if (d->f_link[i])
+ d->f_link[i](app, link_id, 1, d->arg[i]);
+
return 0;
}
@@ -416,6 +593,8 @@ app_link_down(struct app_params *app,
uint32_t link_id)
{
struct app_link_params *p;
+ struct app_link_data *d;
+ uint32_t i;
/* Check input arguments */
if (app == NULL)
@@ -428,6 +607,8 @@ app_link_down(struct app_params *app,
return -1;
}
+ d = &app->link_data[p - app->link_params];
+
/* Check link state */
if (p->state == 0) {
APP_LOG(app, HIGH, "%s is already DOWN", p->name);
@@ -436,6 +617,11 @@ app_link_down(struct app_params *app,
app_link_down_internal(app, p);
+ /* Callbacks */
+ for (i = 0; i < APP_MAX_PIPELINES; i++)
+ if (d->f_link[i])
+ d->f_link[i](app, link_id, 0, d->arg[i]);
+
return 0;
}
@@ -464,16 +650,16 @@ cmd_ping_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_ping_p_string =
+static cmdline_parse_token_string_t cmd_ping_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_ping_result, p_string, "p");
-cmdline_parse_token_num_t cmd_ping_pipeline_id =
+static cmdline_parse_token_num_t cmd_ping_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_ping_result, pipeline_id, UINT32);
-cmdline_parse_token_string_t cmd_ping_ping_string =
+static cmdline_parse_token_string_t cmd_ping_ping_string =
TOKEN_STRING_INITIALIZER(struct cmd_ping_result, ping_string, "ping");
-cmdline_parse_inst_t cmd_ping = {
+static cmdline_parse_inst_t cmd_ping = {
.f = cmd_ping_parsed,
.data = NULL,
.help_str = "Pipeline ping",
@@ -498,6 +684,7 @@ struct cmd_stats_port_in_result {
uint32_t port_in_id;
};
+
static void
cmd_stats_port_in_parsed(
void *parsed_result,
@@ -531,23 +718,23 @@ cmd_stats_port_in_parsed(
stats.stats.n_pkts_drop);
}
-cmdline_parse_token_string_t cmd_stats_port_in_p_string =
+static cmdline_parse_token_string_t cmd_stats_port_in_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_stats_port_in_pipeline_id =
+static cmdline_parse_token_num_t cmd_stats_port_in_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_stats_port_in_stats_string =
+static cmdline_parse_token_string_t cmd_stats_port_in_stats_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, stats_string,
"stats");
-cmdline_parse_token_string_t cmd_stats_port_in_port_string =
+static cmdline_parse_token_string_t cmd_stats_port_in_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, port_string,
"port");
-cmdline_parse_token_string_t cmd_stats_port_in_in_string =
+static cmdline_parse_token_string_t cmd_stats_port_in_in_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, in_string,
"in");
@@ -555,7 +742,7 @@ cmdline_parse_token_string_t cmd_stats_port_in_in_string =
TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, port_in_id,
UINT32);
-cmdline_parse_inst_t cmd_stats_port_in = {
+static cmdline_parse_inst_t cmd_stats_port_in = {
.f = cmd_stats_port_in_parsed,
.data = NULL,
.help_str = "Pipeline input port stats",
@@ -617,31 +804,31 @@ cmd_stats_port_out_parsed(
stats.stats.n_pkts_drop);
}
-cmdline_parse_token_string_t cmd_stats_port_out_p_string =
+static cmdline_parse_token_string_t cmd_stats_port_out_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_stats_port_out_pipeline_id =
+static cmdline_parse_token_num_t cmd_stats_port_out_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_stats_port_out_stats_string =
+static cmdline_parse_token_string_t cmd_stats_port_out_stats_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, stats_string,
"stats");
-cmdline_parse_token_string_t cmd_stats_port_out_port_string =
+static cmdline_parse_token_string_t cmd_stats_port_out_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, port_string,
"port");
-cmdline_parse_token_string_t cmd_stats_port_out_out_string =
+static cmdline_parse_token_string_t cmd_stats_port_out_out_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, out_string,
"out");
-cmdline_parse_token_num_t cmd_stats_port_out_port_out_id =
+static cmdline_parse_token_num_t cmd_stats_port_out_port_out_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, port_out_id,
UINT32);
-cmdline_parse_inst_t cmd_stats_port_out = {
+static cmdline_parse_inst_t cmd_stats_port_out = {
.f = cmd_stats_port_out_parsed,
.data = NULL,
.help_str = "Pipeline output port stats",
@@ -707,26 +894,26 @@ cmd_stats_table_parsed(
stats.n_pkts_dropped_lkp_miss);
}
-cmdline_parse_token_string_t cmd_stats_table_p_string =
+static cmdline_parse_token_string_t cmd_stats_table_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_stats_table_pipeline_id =
+static cmdline_parse_token_num_t cmd_stats_table_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_stats_table_stats_string =
+static cmdline_parse_token_string_t cmd_stats_table_stats_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, stats_string,
"stats");
-cmdline_parse_token_string_t cmd_stats_table_table_string =
+static cmdline_parse_token_string_t cmd_stats_table_table_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, table_string,
"table");
-cmdline_parse_token_num_t cmd_stats_table_table_id =
+static cmdline_parse_token_num_t cmd_stats_table_table_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, table_id, UINT32);
-cmdline_parse_inst_t cmd_stats_table = {
+static cmdline_parse_inst_t cmd_stats_table = {
.f = cmd_stats_table_parsed,
.data = NULL,
.help_str = "Pipeline table stats",
@@ -771,31 +958,31 @@ cmd_port_in_enable_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_port_in_enable_p_string =
+static cmdline_parse_token_string_t cmd_port_in_enable_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_port_in_enable_pipeline_id =
+static cmdline_parse_token_num_t cmd_port_in_enable_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_port_in_enable_port_string =
+static cmdline_parse_token_string_t cmd_port_in_enable_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, port_string,
"port");
-cmdline_parse_token_string_t cmd_port_in_enable_in_string =
+static cmdline_parse_token_string_t cmd_port_in_enable_in_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, in_string,
"in");
-cmdline_parse_token_num_t cmd_port_in_enable_port_in_id =
+static cmdline_parse_token_num_t cmd_port_in_enable_port_in_id =
TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, port_in_id,
UINT32);
-cmdline_parse_token_string_t cmd_port_in_enable_enable_string =
+static cmdline_parse_token_string_t cmd_port_in_enable_enable_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result,
enable_string, "enable");
-cmdline_parse_inst_t cmd_port_in_enable = {
+static cmdline_parse_inst_t cmd_port_in_enable = {
.f = cmd_port_in_enable_parsed,
.data = NULL,
.help_str = "Pipeline input port enable",
@@ -841,31 +1028,31 @@ cmd_port_in_disable_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_port_in_disable_p_string =
+static cmdline_parse_token_string_t cmd_port_in_disable_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_port_in_disable_pipeline_id =
+static cmdline_parse_token_num_t cmd_port_in_disable_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_port_in_disable_port_string =
+static cmdline_parse_token_string_t cmd_port_in_disable_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, port_string,
"port");
-cmdline_parse_token_string_t cmd_port_in_disable_in_string =
+static cmdline_parse_token_string_t cmd_port_in_disable_in_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, in_string,
"in");
-cmdline_parse_token_num_t cmd_port_in_disable_port_in_id =
+static cmdline_parse_token_num_t cmd_port_in_disable_port_in_id =
TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, port_in_id,
UINT32);
-cmdline_parse_token_string_t cmd_port_in_disable_disable_string =
+static cmdline_parse_token_string_t cmd_port_in_disable_disable_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result,
disable_string, "disable");
-cmdline_parse_inst_t cmd_port_in_disable = {
+static cmdline_parse_inst_t cmd_port_in_disable = {
.f = cmd_port_in_disable_parsed,
.data = NULL,
.help_str = "Pipeline input port disable",
@@ -963,219 +1150,144 @@ print_link_info(struct app_link_params *p)
printf("\n");
}
-struct cmd_link_config_result {
- cmdline_fixed_string_t link_string;
- uint32_t link_id;
- cmdline_fixed_string_t config_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
-};
-
-static void
-cmd_link_config_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_link_config_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- uint32_t link_id = params->link_id;
- uint32_t ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- uint32_t depth = params->depth;
-
- status = app_link_config(app, link_id, ip, depth);
- if (status)
- printf("Command failed\n");
- else {
- struct app_link_params *p;
-
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
- print_link_info(p);
- }
-}
-
-cmdline_parse_token_string_t cmd_link_config_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_config_result, link_string,
- "link");
-
-cmdline_parse_token_num_t cmd_link_config_link_id =
- TOKEN_NUM_INITIALIZER(struct cmd_link_config_result, link_id, UINT32);
-
-cmdline_parse_token_string_t cmd_link_config_config_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_config_result, config_string,
- "config");
-
-cmdline_parse_token_ipaddr_t cmd_link_config_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_link_config_result, ip);
-
-cmdline_parse_token_num_t cmd_link_config_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_link_config_result, depth, UINT32);
-
-cmdline_parse_inst_t cmd_link_config = {
- .f = cmd_link_config_parsed,
- .data = NULL,
- .help_str = "Link configuration",
- .tokens = {
- (void *)&cmd_link_config_link_string,
- (void *)&cmd_link_config_link_id,
- (void *)&cmd_link_config_config_string,
- (void *)&cmd_link_config_ip,
- (void *)&cmd_link_config_depth,
- NULL,
- },
-};
-
/*
- * link up
+ * link
+ *
+ * link config:
+ * link <linkid> config <ipaddr> <depth>
+ *
+ * link up:
+ * link <linkid> up
+ *
+ * link down:
+ * link <linkid> down
+ *
+ * link ls:
+ * link ls
*/
-struct cmd_link_up_result {
+struct cmd_link_result {
cmdline_fixed_string_t link_string;
- uint32_t link_id;
- cmdline_fixed_string_t up_string;
+ cmdline_multi_string_t multi_string;
};
static void
-cmd_link_up_parsed(
+cmd_link_parsed(
void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
- void *data)
+ void *data)
{
- struct cmd_link_up_result *params = parsed_result;
+ struct cmd_link_result *params = parsed_result;
struct app_params *app = data;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
int status;
- status = app_link_up(app, params->link_id);
- if (status != 0)
- printf("Command failed\n");
- else {
- struct app_link_params *p;
+ uint32_t link_id;
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", params->link_id,
- p);
- print_link_info(p);
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status != 0) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "link");
+ return;
}
-}
-
-cmdline_parse_token_string_t cmd_link_up_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_up_result, link_string,
- "link");
-cmdline_parse_token_num_t cmd_link_up_link_id =
- TOKEN_NUM_INITIALIZER(struct cmd_link_up_result, link_id, UINT32);
+ /* link ls */
+ if ((n_tokens == 1) && (strcmp(tokens[0], "ls") == 0)) {
+ for (link_id = 0; link_id < app->n_links; link_id++) {
+ struct app_link_params *p;
-cmdline_parse_token_string_t cmd_link_up_up_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_up_result, up_string, "up");
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ print_link_info(p);
+ }
+ return;
+ } /* link ls */
-cmdline_parse_inst_t cmd_link_up = {
- .f = cmd_link_up_parsed,
- .data = NULL,
- .help_str = "Link UP",
- .tokens = {
- (void *)&cmd_link_up_link_string,
- (void *)&cmd_link_up_link_id,
- (void *)&cmd_link_up_up_string,
- NULL,
- },
-};
+ if (n_tokens < 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link");
+ return;
+ }
-/*
- * link down
- */
+ if (parser_read_uint32(&link_id, tokens[0])) {
+ printf(CMD_MSG_INVALID_ARG, "linkid");
+ return;
+ }
-struct cmd_link_down_result {
- cmdline_fixed_string_t link_string;
- uint32_t link_id;
- cmdline_fixed_string_t down_string;
-};
+ /* link config */
+ if (strcmp(tokens[1], "config") == 0) {
+ struct in_addr ipaddr_ipv4;
+ uint32_t depth;
-static void
-cmd_link_down_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_link_down_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link config");
+ return;
+ }
- status = app_link_down(app, params->link_id);
- if (status != 0)
- printf("Command failed\n");
- else {
- struct app_link_params *p;
+ if (parse_ipv4_addr(tokens[2], &ipaddr_ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", params->link_id,
- p);
- print_link_info(p);
- }
-}
+ if (parser_read_uint32(&depth, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
-cmdline_parse_token_string_t cmd_link_down_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_down_result, link_string,
- "link");
+ status = app_link_config(app,
+ link_id,
+ rte_be_to_cpu_32(ipaddr_ipv4.s_addr),
+ depth);
+ if (status)
+ printf(CMD_MSG_FAIL, "link config");
-cmdline_parse_token_num_t cmd_link_down_link_id =
- TOKEN_NUM_INITIALIZER(struct cmd_link_down_result, link_id, UINT32);
+ return;
+ } /* link config */
-cmdline_parse_token_string_t cmd_link_down_down_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_down_result, down_string,
- "down");
+ /* link up */
+ if (strcmp(tokens[1], "up") == 0) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link up");
+ return;
+ }
-cmdline_parse_inst_t cmd_link_down = {
- .f = cmd_link_down_parsed,
- .data = NULL,
- .help_str = "Link DOWN",
- .tokens = {
- (void *) &cmd_link_down_link_string,
- (void *) &cmd_link_down_link_id,
- (void *) &cmd_link_down_down_string,
- NULL,
- },
-};
+ status = app_link_up(app, link_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "link up");
-/*
- * link ls
- */
+ return;
+ } /* link up */
-struct cmd_link_ls_result {
- cmdline_fixed_string_t link_string;
- cmdline_fixed_string_t ls_string;
-};
+ /* link down */
+ if (strcmp(tokens[1], "down") == 0) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link down");
+ return;
+ }
-static void
-cmd_link_ls_parsed(
- __attribute__((unused)) void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct app_params *app = data;
- uint32_t link_id;
+ status = app_link_down(app, link_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "link down");
- for (link_id = 0; link_id < app->n_links; link_id++) {
- struct app_link_params *p;
+ return;
+ } /* link down */
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
- print_link_info(p);
- }
+ printf(CMD_MSG_MISMATCH_ARGS, "link");
}
-cmdline_parse_token_string_t cmd_link_ls_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_ls_result, link_string,
- "link");
+static cmdline_parse_token_string_t cmd_link_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_result, link_string, "link");
-cmdline_parse_token_string_t cmd_link_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_ls_result, ls_string, "ls");
+static cmdline_parse_token_string_t cmd_link_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_inst_t cmd_link_ls = {
- .f = cmd_link_ls_parsed,
+static cmdline_parse_inst_t cmd_link = {
+ .f = cmd_link_parsed,
.data = NULL,
- .help_str = "Link list",
+ .help_str = "link config / up / down / ls",
.tokens = {
- (void *)&cmd_link_ls_link_string,
- (void *)&cmd_link_ls_ls_string,
+ (void *) &cmd_link_link_string,
+ (void *) &cmd_link_multi_string,
NULL,
},
};
@@ -1212,6 +1324,11 @@ static cmdline_parse_inst_t cmd_quit = {
/*
* run
+ *
+ * run <file>
+ * run <file> [<count> [<interval>]]
+ <count> default is 1
+ * <interval> is measured in milliseconds, default is 1 second
*/
static void
@@ -1233,9 +1350,9 @@ app_run_file(
close(fd);
}
-struct cmd_run_file_result {
+struct cmd_run_result {
cmdline_fixed_string_t run_string;
- char file_name[APP_FILE_NAME_SIZE];
+ cmdline_multi_string_t multi_string;
};
static void
@@ -1244,25 +1361,87 @@ cmd_run_parsed(
struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_run_file_result *params = parsed_result;
+ struct cmd_run_result *params = parsed_result;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ char *file_name;
+ uint32_t count, interval, i;
- app_run_file(cl->ctx, params->file_name);
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "run");
+ return;
+ }
+
+ switch (n_tokens) {
+ case 0:
+ printf(CMD_MSG_NOT_ENOUGH_ARGS, "run");
+ return;
+
+ case 1:
+ file_name = tokens[0];
+ count = 1;
+ interval = 1000;
+ break;
+
+ case 2:
+ file_name = tokens[0];
+
+ if (parser_read_uint32(&count, tokens[1]) ||
+ (count == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "count");
+ return;
+ }
+
+ interval = 1000;
+ break;
+
+ case 3:
+ file_name = tokens[0];
+
+ if (parser_read_uint32(&count, tokens[1]) ||
+ (count == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "count");
+ return;
+ }
+
+ if (parser_read_uint32(&interval, tokens[2]) ||
+ (interval == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "interval");
+ return;
+ }
+ break;
+
+ default:
+ printf(CMD_MSG_MISMATCH_ARGS, "run");
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ app_run_file(cl->ctx, file_name);
+ if (interval)
+ usleep(interval * 1000);
+ }
}
-cmdline_parse_token_string_t cmd_run_run_string =
- TOKEN_STRING_INITIALIZER(struct cmd_run_file_result, run_string,
- "run");
+static cmdline_parse_token_string_t cmd_run_run_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_result, run_string, "run");
+
+static cmdline_parse_token_string_t cmd_run_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_token_string_t cmd_run_file_name =
- TOKEN_STRING_INITIALIZER(struct cmd_run_file_result, file_name, NULL);
-cmdline_parse_inst_t cmd_run = {
+static cmdline_parse_inst_t cmd_run = {
.f = cmd_run_parsed,
.data = NULL,
.help_str = "Run CLI script file",
.tokens = {
(void *) &cmd_run_run_string,
- (void *) &cmd_run_file_name,
+ (void *) &cmd_run_multi_string,
NULL,
},
};
@@ -1270,12 +1449,7 @@ cmdline_parse_inst_t cmd_run = {
static cmdline_parse_ctx_t pipeline_common_cmds[] = {
(cmdline_parse_inst_t *) &cmd_quit,
(cmdline_parse_inst_t *) &cmd_run,
-
- (cmdline_parse_inst_t *) &cmd_link_config,
- (cmdline_parse_inst_t *) &cmd_link_up,
- (cmdline_parse_inst_t *) &cmd_link_down,
- (cmdline_parse_inst_t *) &cmd_link_ls,
-
+ (cmdline_parse_inst_t *) &cmd_link,
(cmdline_parse_inst_t *) &cmd_ping,
(cmdline_parse_inst_t *) &cmd_stats_port_in,
(cmdline_parse_inst_t *) &cmd_stats_port_out,
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.h b/examples/ip_pipeline/pipeline/pipeline_common_fe.h
index cfad963d..ce0bf13e 100644
--- a/examples/ip_pipeline/pipeline/pipeline_common_fe.h
+++ b/examples/ip_pipeline/pipeline/pipeline_common_fe.h
@@ -182,6 +182,16 @@ app_msg_send_recv(struct app_params *app,
return msg_recv;
}
+struct app_link_params *
+app_pipeline_track_pktq_out_to_link(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t pktq_out_id);
+
+int
+app_pipeline_track_default(struct pipeline_params *params,
+ uint32_t port_in,
+ uint32_t *port_out);
+
int
app_pipeline_ping(struct app_params *app,
uint32_t pipeline_id);
@@ -215,6 +225,13 @@ app_pipeline_port_in_disable(struct app_params *app,
uint32_t port_id);
int
+app_link_set_op(struct app_params *app,
+ uint32_t link_id,
+ uint32_t pipeline_id,
+ app_link_op op,
+ void *arg);
+
+int
app_link_config(struct app_params *app,
uint32_t link_id,
uint32_t ip,
@@ -231,4 +248,13 @@ app_link_down(struct app_params *app,
int
app_pipeline_common_cmd_push(struct app_params *app);
+#define CMD_MSG_OUT_OF_MEMORY "Not enough memory\n"
+#define CMD_MSG_NOT_ENOUGH_ARGS "Not enough arguments for command \"%s\"\n"
+#define CMD_MSG_TOO_MANY_ARGS "Too many arguments for command \"%s\"\n"
+#define CMD_MSG_MISMATCH_ARGS "Incorrect set of arguments for command \"%s\"\n"
+#define CMD_MSG_INVALID_ARG "Invalid value for argument \"%s\"\n"
+#define CMD_MSG_ARG_NOT_FOUND "Syntax error: \"%s\" not found\n"
+#define CMD_MSG_FILE_ERR "Error in file \"%s\" at line %u\n"
+#define CMD_MSG_FAIL "Command \"%s\" failed\n"
+
#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall.c b/examples/ip_pipeline/pipeline/pipeline_firewall.c
index fd897d5c..a82e552d 100644
--- a/examples/ip_pipeline/pipeline/pipeline_firewall.c
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall.c
@@ -30,9 +30,11 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
+#include <errno.h>
#include <stdio.h>
#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
#include <sys/queue.h>
#include <netinet/in.h>
@@ -43,15 +45,11 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
-#include <cmdline_socket.h>
#include "app.h"
#include "pipeline_common_fe.h"
#include "pipeline_firewall.h"
-
-#define BUF_SIZE 1024
+#include "parser.h"
struct app_pipeline_firewall_rule {
struct pipeline_firewall_key key;
@@ -75,18 +73,6 @@ struct app_pipeline_firewall {
void *default_rule_entry_ptr;
};
-struct app_pipeline_add_bulk_params {
- struct pipeline_firewall_key *keys;
- uint32_t n_keys;
- uint32_t *priorities;
- uint32_t *port_ids;
-};
-
-struct app_pipeline_del_bulk_params {
- struct pipeline_firewall_key *keys;
- uint32_t n_keys;
-};
-
static void
print_firewall_ipv4_rule(struct app_pipeline_firewall_rule *rule)
{
@@ -272,356 +258,118 @@ app_pipeline_firewall_key_check_and_normalize(struct pipeline_firewall_key *key)
}
}
-static int
-app_pipeline_add_bulk_parse_file(char *filename,
- struct app_pipeline_add_bulk_params *params)
+int
+app_pipeline_firewall_load_file(char *filename,
+ struct pipeline_firewall_key *keys,
+ uint32_t *priorities,
+ uint32_t *port_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
{
- FILE *f;
- char file_buf[BUF_SIZE];
- uint32_t i;
- int status = 0;
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
- f = fopen(filename, "r");
- if (f == NULL)
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (priorities == NULL) ||
+ (port_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
return -1;
-
- params->n_keys = 0;
- while (fgets(file_buf, BUF_SIZE, f) != NULL)
- params->n_keys++;
- rewind(f);
-
- if (params->n_keys == 0) {
- status = -1;
- goto end;
- }
-
- params->keys = rte_malloc(NULL,
- params->n_keys * sizeof(struct pipeline_firewall_key),
- RTE_CACHE_LINE_SIZE);
- if (params->keys == NULL) {
- status = -1;
- goto end;
- }
-
- params->priorities = rte_malloc(NULL,
- params->n_keys * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (params->priorities == NULL) {
- status = -1;
- goto end;
- }
-
- params->port_ids = rte_malloc(NULL,
- params->n_keys * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (params->port_ids == NULL) {
- status = -1;
- goto end;
- }
-
- i = 0;
- while (fgets(file_buf, BUF_SIZE, f) != NULL) {
- char *str;
-
- str = strtok(file_buf, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->priorities[i] = atoi(str);
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip = atoi(str)<<24;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<16;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<8;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
}
- params->keys[i].key.ipv4_5tuple.src_ip_mask = atoi(str);
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip = atoi(str)<<24;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<16;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<8;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip_mask = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_port_from = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_port_to = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_port_from = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_port_to = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.proto = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- /* Need to add 2 to str to skip leading 0x */
- params->keys[i].key.ipv4_5tuple.proto_mask = strtol(str+2, NULL, 16);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->port_ids[i] = atoi(str);
- params->keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
-
- i++;
- }
-
-end:
- fclose(f);
- return status;
-}
-
-static int
-app_pipeline_del_bulk_parse_file(char *filename,
- struct app_pipeline_del_bulk_params *params)
-{
- FILE *f;
- char file_buf[BUF_SIZE];
- uint32_t i;
- int status = 0;
+ /* Open input file */
f = fopen(filename, "r");
- if (f == NULL)
+ if (f == NULL) {
+ *line = 0;
return -1;
-
- params->n_keys = 0;
- while (fgets(file_buf, BUF_SIZE, f) != NULL)
- params->n_keys++;
- rewind(f);
-
- if (params->n_keys == 0) {
- status = -1;
- goto end;
- }
-
- params->keys = rte_malloc(NULL,
- params->n_keys * sizeof(struct pipeline_firewall_key),
- RTE_CACHE_LINE_SIZE);
- if (params->keys == NULL) {
- status = -1;
- goto end;
}
- i = 0;
- while (fgets(file_buf, BUF_SIZE, f) != NULL) {
- char *str;
-
- str = strtok(file_buf, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip = atoi(str)<<24;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<16;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<8;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip_mask = atoi(str);
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip = atoi(str)<<24;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<16;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<8;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip_mask = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_port_from = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_port_to = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_port_from = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_port_to = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.proto = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- /* Need to add 2 to str to skip leading 0x */
- params->keys[i].key.ipv4_5tuple.proto_mask = strtol(str+2, NULL, 16);
-
- params->keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ uint32_t priority = 0;
+ struct in_addr sipaddr;
+ uint32_t sipdepth = 0;
+ struct in_addr dipaddr;
+ uint32_t dipdepth = 0;
+ uint16_t sport0 = 0;
+ uint16_t sport1 = 0;
+ uint16_t dport0 = 0;
+ uint16_t dport1 = 0;
+ uint8_t proto = 0;
+ uint8_t protomask = 0;
+ uint32_t port_id = 0;
+
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 15) ||
+ strcmp(tokens[0], "priority") ||
+ parser_read_uint32(&priority, tokens[1]) ||
+ strcmp(tokens[2], "ipv4") ||
+ parse_ipv4_addr(tokens[3], &sipaddr) ||
+ parser_read_uint32(&sipdepth, tokens[4]) ||
+ parse_ipv4_addr(tokens[5], &dipaddr) ||
+ parser_read_uint32(&dipdepth, tokens[6]) ||
+ parser_read_uint16(&sport0, tokens[7]) ||
+ parser_read_uint16(&sport1, tokens[8]) ||
+ parser_read_uint16(&dport0, tokens[9]) ||
+ parser_read_uint16(&dport1, tokens[10]) ||
+ parser_read_uint8(&proto, tokens[11]) ||
+ parser_read_uint8_hex(&protomask, tokens[12]) ||
+ strcmp(tokens[13], "port") ||
+ parser_read_uint32(&port_id, tokens[14]))
+ goto error1;
+
+ keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ keys[i].key.ipv4_5tuple.src_ip =
+ rte_be_to_cpu_32(sipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.src_ip_mask = sipdepth;
+ keys[i].key.ipv4_5tuple.dst_ip =
+ rte_be_to_cpu_32(dipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ keys[i].key.ipv4_5tuple.src_port_from = sport0;
+ keys[i].key.ipv4_5tuple.src_port_to = sport1;
+ keys[i].key.ipv4_5tuple.dst_port_from = dport0;
+ keys[i].key.ipv4_5tuple.dst_port_to = dport1;
+ keys[i].key.ipv4_5tuple.proto = proto;
+ keys[i].key.ipv4_5tuple.proto_mask = protomask;
+
+ port_ids[i] = port_id;
+ priorities[i] = priority;
+
+ if (app_pipeline_firewall_key_check_and_normalize(&keys[i]))
+ goto error1;
i++;
}
- for (i = 0; i < params->n_keys; i++) {
- if (app_pipeline_firewall_key_check_and_normalize(&params->keys[i]) != 0) {
- status = -1;
- goto end;
- }
- }
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
-end:
+error1:
+ *line = l;
fclose(f);
- return status;
+ return -1;
}
int
@@ -804,14 +552,14 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
return -1;
rules = rte_malloc(NULL,
- n_keys * sizeof(struct app_pipeline_firewall_rule *),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(struct app_pipeline_firewall_rule *),
+ RTE_CACHE_LINE_SIZE);
if (rules == NULL)
return -1;
new_rules = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
if (new_rules == NULL) {
rte_free(rules);
return -1;
@@ -834,8 +582,9 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
rules[i] = app_pipeline_firewall_rule_find(p, &keys[i]);
new_rules[i] = (rules[i] == NULL);
if (rules[i] == NULL) {
- rules[i] = rte_malloc(NULL, sizeof(*rules[i]),
- RTE_CACHE_LINE_SIZE);
+ rules[i] = rte_malloc(NULL,
+ sizeof(*rules[i]),
+ RTE_CACHE_LINE_SIZE);
if (rules[i] == NULL) {
uint32_t j;
@@ -852,8 +601,8 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
}
keys_found = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
if (keys_found == NULL) {
uint32_t j;
@@ -867,8 +616,8 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
}
entries_ptr = rte_malloc(NULL,
- n_keys * sizeof(struct rte_pipeline_table_entry *),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(struct rte_pipeline_table_entry *),
+ RTE_CACHE_LINE_SIZE);
if (entries_ptr == NULL) {
uint32_t j;
@@ -883,8 +632,8 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
}
for (i = 0; i < n_keys; i++) {
entries_ptr[i] = rte_malloc(NULL,
- sizeof(struct rte_pipeline_table_entry),
- RTE_CACHE_LINE_SIZE);
+ sizeof(struct rte_pipeline_table_entry),
+ RTE_CACHE_LINE_SIZE);
if (entries_ptr[i] == NULL) {
uint32_t j;
@@ -1030,8 +779,8 @@ app_pipeline_firewall_delete_bulk(struct app_params *app,
return -1;
rules = rte_malloc(NULL,
- n_keys * sizeof(struct app_pipeline_firewall_rule *),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(struct app_pipeline_firewall_rule *),
+ RTE_CACHE_LINE_SIZE);
if (rules == NULL)
return -1;
@@ -1044,8 +793,8 @@ app_pipeline_firewall_delete_bulk(struct app_params *app,
}
keys_found = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
if (keys_found == NULL) {
rte_free(rules);
return -1;
@@ -1197,668 +946,500 @@ app_pipeline_firewall_delete_default_rule(struct app_params *app,
}
/*
- * p firewall add ipv4
+ * firewall
+ *
+ * firewall add:
+ * p <pipelineid> firewall add priority <priority>
+ * ipv4 <sipaddr> <sipdepth> <dipaddr> <dipdepth>
+ * <sport0> <sport1> <dport0> <dport1> <proto> <protomask>
+ * port <portid>
+ * Note: <protomask> is a hex value
+ *
+ * p <pipelineid> firewall add bulk <file>
+ *
+ * firewall add default:
+ * p <pipelineid> firewall add default <port ID>
+ *
+ * firewall del:
+ * p <pipelineid> firewall del
+ * ipv4 <sipaddr> <sipdepth> <dipaddr> <dipdepth>
+ * <sport0> <sport1> <dport0> <dport1> <proto> <protomask>
+ *
+ * p <pipelineid> firewall del bulk <file>
+ *
+ * firewall del default:
+ * p <pipelineid> firewall del default
+ *
+ * firewall ls:
+ * p <pipelineid> firewall ls
*/
-struct cmd_firewall_add_ipv4_result {
+struct cmd_firewall_result {
cmdline_fixed_string_t p_string;
uint32_t pipeline_id;
cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv4_string;
- int32_t priority;
- cmdline_ipaddr_t src_ip;
- uint32_t src_ip_mask;
- cmdline_ipaddr_t dst_ip;
- uint32_t dst_ip_mask;
- uint16_t src_port_from;
- uint16_t src_port_to;
- uint16_t dst_port_from;
- uint16_t dst_port_to;
- uint8_t proto;
- uint8_t proto_mask;
- uint8_t port_id;
+ cmdline_multi_string_t multi_string;
};
-static void
-cmd_firewall_add_ipv4_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
+static void cmd_firewall_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
void *data)
{
- struct cmd_firewall_add_ipv4_result *params = parsed_result;
+ struct cmd_firewall_result *params = parsed_result;
struct app_params *app = data;
- struct pipeline_firewall_key key;
int status;
- key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
- key.key.ipv4_5tuple.src_ip = rte_bswap32(
- (uint32_t) params->src_ip.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.src_ip_mask = params->src_ip_mask;
- key.key.ipv4_5tuple.dst_ip = rte_bswap32(
- (uint32_t) params->dst_ip.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.dst_ip_mask = params->dst_ip_mask;
- key.key.ipv4_5tuple.src_port_from = params->src_port_from;
- key.key.ipv4_5tuple.src_port_to = params->src_port_to;
- key.key.ipv4_5tuple.dst_port_from = params->dst_port_from;
- key.key.ipv4_5tuple.dst_port_to = params->dst_port_to;
- key.key.ipv4_5tuple.proto = params->proto;
- key.key.ipv4_5tuple.proto_mask = params->proto_mask;
-
- status = app_pipeline_firewall_add_rule(app,
- params->pipeline_id,
- &key,
- params->priority,
- params->port_id);
-
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
-}
-
-cmdline_parse_token_string_t cmd_firewall_add_ipv4_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result, p_string,
- "p");
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_firewall_add_ipv4_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- firewall_string, "firewall");
-
-cmdline_parse_token_string_t cmd_firewall_add_ipv4_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- add_string, "add");
-
-cmdline_parse_token_string_t cmd_firewall_add_ipv4_ipv4_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- ipv4_string, "ipv4");
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_priority =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, priority,
- INT32);
-
-cmdline_parse_token_ipaddr_t cmd_firewall_add_ipv4_src_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_firewall_add_ipv4_result, src_ip);
+ char *tokens[17];
+ uint32_t n_tokens = RTE_DIM(tokens);
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_ip_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, src_ip_mask,
- UINT32);
-
-cmdline_parse_token_ipaddr_t cmd_firewall_add_ipv4_dst_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_firewall_add_ipv4_result, dst_ip);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_ip_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, dst_ip_mask,
- UINT32);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_port_from =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- src_port_from, UINT16);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_port_to =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- src_port_to, UINT16);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_port_from =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- dst_port_from, UINT16);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_port_to =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- dst_port_to, UINT16);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- proto, UINT8);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_proto_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- proto_mask, UINT8);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- port_id, UINT8);
-
-cmdline_parse_inst_t cmd_firewall_add_ipv4 = {
- .f = cmd_firewall_add_ipv4_parsed,
- .data = NULL,
- .help_str = "Firewall rule add",
- .tokens = {
- (void *) &cmd_firewall_add_ipv4_p_string,
- (void *) &cmd_firewall_add_ipv4_pipeline_id,
- (void *) &cmd_firewall_add_ipv4_firewall_string,
- (void *) &cmd_firewall_add_ipv4_add_string,
- (void *) &cmd_firewall_add_ipv4_ipv4_string,
- (void *) &cmd_firewall_add_ipv4_priority,
- (void *) &cmd_firewall_add_ipv4_src_ip,
- (void *) &cmd_firewall_add_ipv4_src_ip_mask,
- (void *) &cmd_firewall_add_ipv4_dst_ip,
- (void *) &cmd_firewall_add_ipv4_dst_ip_mask,
- (void *) &cmd_firewall_add_ipv4_src_port_from,
- (void *) &cmd_firewall_add_ipv4_src_port_to,
- (void *) &cmd_firewall_add_ipv4_dst_port_from,
- (void *) &cmd_firewall_add_ipv4_dst_port_to,
- (void *) &cmd_firewall_add_ipv4_proto,
- (void *) &cmd_firewall_add_ipv4_proto_mask,
- (void *) &cmd_firewall_add_ipv4_port_id,
- NULL,
- },
-};
-
-/*
- * p firewall del ipv4
- */
-
-struct cmd_firewall_del_ipv4_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t ipv4_string;
- cmdline_ipaddr_t src_ip;
- uint32_t src_ip_mask;
- cmdline_ipaddr_t dst_ip;
- uint32_t dst_ip_mask;
- uint16_t src_port_from;
- uint16_t src_port_to;
- uint16_t dst_port_from;
- uint16_t dst_port_to;
- uint8_t proto;
- uint8_t proto_mask;
-};
-
-static void
-cmd_firewall_del_ipv4_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_del_ipv4_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_firewall_key key;
- int status;
-
- key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
- key.key.ipv4_5tuple.src_ip = rte_bswap32(
- (uint32_t) params->src_ip.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.src_ip_mask = params->src_ip_mask;
- key.key.ipv4_5tuple.dst_ip = rte_bswap32(
- (uint32_t) params->dst_ip.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.dst_ip_mask = params->dst_ip_mask;
- key.key.ipv4_5tuple.src_port_from = params->src_port_from;
- key.key.ipv4_5tuple.src_port_to = params->src_port_to;
- key.key.ipv4_5tuple.dst_port_from = params->dst_port_from;
- key.key.ipv4_5tuple.dst_port_to = params->dst_port_to;
- key.key.ipv4_5tuple.proto = params->proto;
- key.key.ipv4_5tuple.proto_mask = params->proto_mask;
-
- status = app_pipeline_firewall_delete_rule(app,
- params->pipeline_id,
- &key);
-
- if (status != 0) {
- printf("Command failed\n");
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "firewall");
return;
}
-}
-
-cmdline_parse_token_string_t cmd_firewall_del_ipv4_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result, p_string,
- "p");
-
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_firewall_del_ipv4_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- firewall_string, "firewall");
-
-cmdline_parse_token_string_t cmd_firewall_del_ipv4_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- del_string, "del");
-cmdline_parse_token_string_t cmd_firewall_del_ipv4_ipv4_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- ipv4_string, "ipv4");
-
-cmdline_parse_token_ipaddr_t cmd_firewall_del_ipv4_src_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_ip);
-
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_ip_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_ip_mask,
- UINT32);
-
-cmdline_parse_token_ipaddr_t cmd_firewall_del_ipv4_dst_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_firewall_del_ipv4_result, dst_ip);
-
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_ip_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, dst_ip_mask,
- UINT32);
+ /* firewall add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "priority") == 0)) {
+ struct pipeline_firewall_key key;
+ uint32_t priority;
+ struct in_addr sipaddr;
+ uint32_t sipdepth;
+ struct in_addr dipaddr;
+ uint32_t dipdepth;
+ uint16_t sport0;
+ uint16_t sport1;
+ uint16_t dport0;
+ uint16_t dport1;
+ uint8_t proto;
+ uint8_t protomask;
+ uint32_t port_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 16) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_port_from =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- src_port_from, UINT16);
+ if (parser_read_uint32(&priority, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "priority");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_port_to =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_port_to,
- UINT16);
+ if (strcmp(tokens[3], "ipv4")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "ipv4");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_port_from =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- dst_port_from, UINT16);
+ if (parse_ipv4_addr(tokens[4], &sipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "sipaddr");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_port_to =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- dst_port_to, UINT16);
+ if (parser_read_uint32(&sipdepth, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "sipdepth");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- proto, UINT8);
+ if (parse_ipv4_addr(tokens[6], &dipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "dipaddr");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_proto_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, proto_mask,
- UINT8);
+ if (parser_read_uint32(&dipdepth, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "dipdepth");
+ return;
+ }
-cmdline_parse_inst_t cmd_firewall_del_ipv4 = {
- .f = cmd_firewall_del_ipv4_parsed,
- .data = NULL,
- .help_str = "Firewall rule delete",
- .tokens = {
- (void *) &cmd_firewall_del_ipv4_p_string,
- (void *) &cmd_firewall_del_ipv4_pipeline_id,
- (void *) &cmd_firewall_del_ipv4_firewall_string,
- (void *) &cmd_firewall_del_ipv4_del_string,
- (void *) &cmd_firewall_del_ipv4_ipv4_string,
- (void *) &cmd_firewall_del_ipv4_src_ip,
- (void *) &cmd_firewall_del_ipv4_src_ip_mask,
- (void *) &cmd_firewall_del_ipv4_dst_ip,
- (void *) &cmd_firewall_del_ipv4_dst_ip_mask,
- (void *) &cmd_firewall_del_ipv4_src_port_from,
- (void *) &cmd_firewall_del_ipv4_src_port_to,
- (void *) &cmd_firewall_del_ipv4_dst_port_from,
- (void *) &cmd_firewall_del_ipv4_dst_port_to,
- (void *) &cmd_firewall_del_ipv4_proto,
- (void *) &cmd_firewall_del_ipv4_proto_mask,
- NULL,
- },
-};
+ if (parser_read_uint16(&sport0, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "sport0");
+ return;
+ }
-/*
- * p firewall add bulk
- */
+ if (parser_read_uint16(&sport1, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "sport1");
+ return;
+ }
-struct cmd_firewall_add_bulk_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t bulk_string;
- cmdline_fixed_string_t file_path;
-};
+ if (parser_read_uint16(&dport0, tokens[10])) {
+ printf(CMD_MSG_INVALID_ARG, "dport0");
+ return;
+ }
-static void
-cmd_firewall_add_bulk_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_add_bulk_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ if (parser_read_uint16(&dport1, tokens[11])) {
+ printf(CMD_MSG_INVALID_ARG, "dport1");
+ return;
+ }
- struct app_pipeline_add_bulk_params add_bulk_params;
+ if (parser_read_uint8(&proto, tokens[12])) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
- status = app_pipeline_add_bulk_parse_file(params->file_path, &add_bulk_params);
- if (status != 0) {
- printf("Command failed\n");
- goto end;
- }
+ if (parser_read_uint8_hex(&protomask, tokens[13])) {
+ printf(CMD_MSG_INVALID_ARG, "protomask");
+ return;
+ }
- status = app_pipeline_firewall_add_bulk(app, params->pipeline_id, add_bulk_params.keys,
- add_bulk_params.n_keys, add_bulk_params.priorities, add_bulk_params.port_ids);
- if (status != 0) {
- printf("Command failed\n");
- goto end;
- }
+ if (strcmp(tokens[14], "port")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-end:
- rte_free(add_bulk_params.keys);
- rte_free(add_bulk_params.priorities);
- rte_free(add_bulk_params.port_ids);
-}
+ if (parser_read_uint32(&port_id, tokens[15])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_add_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result, p_string,
- "p");
+ key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.src_ip = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.src_ip_mask = sipdepth;
+ key.key.ipv4_5tuple.dst_ip = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ key.key.ipv4_5tuple.src_port_from = sport0;
+ key.key.ipv4_5tuple.src_port_to = sport1;
+ key.key.ipv4_5tuple.dst_port_from = dport0;
+ key.key.ipv4_5tuple.dst_port_to = dport1;
+ key.key.ipv4_5tuple.proto = proto;
+ key.key.ipv4_5tuple.proto_mask = protomask;
+
+ status = app_pipeline_firewall_add_rule(app,
+ params->pipeline_id,
+ &key,
+ priority,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add");
-cmdline_parse_token_num_t cmd_firewall_add_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_bulk_result, pipeline_id,
- UINT32);
+ return;
+ } /* firewall add */
+
+ /* firewall add bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_firewall_key *keys;
+ uint32_t *priorities, *port_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add bulk");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_add_bulk_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
- firewall_string, "firewall");
+ filename = tokens[2];
-cmdline_parse_token_string_t cmd_firewall_add_bulk_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
- add_string, "add");
+ n_keys = APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_firewall_key));
+ if (keys == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
+ memset(keys, 0, n_keys * sizeof(struct pipeline_firewall_key));
-cmdline_parse_token_string_t cmd_firewall_add_bulk_bulk_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
- bulk_string, "bulk");
+ priorities = malloc(n_keys * sizeof(uint32_t));
+ if (priorities == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(keys);
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_add_bulk_file_path_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
- file_path, NULL);
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(priorities);
+ free(keys);
+ return;
+ }
-cmdline_parse_inst_t cmd_firewall_add_bulk = {
- .f = cmd_firewall_add_bulk_parsed,
- .data = NULL,
- .help_str = "Firewall rule add bulk",
- .tokens = {
- (void *) &cmd_firewall_add_bulk_p_string,
- (void *) &cmd_firewall_add_bulk_pipeline_id,
- (void *) &cmd_firewall_add_bulk_firewall_string,
- (void *) &cmd_firewall_add_bulk_add_string,
- (void *) &cmd_firewall_add_bulk_bulk_string,
- (void *) &cmd_firewall_add_bulk_file_path_string,
- NULL,
- },
-};
+ status = app_pipeline_firewall_load_file(filename,
+ keys,
+ priorities,
+ port_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(port_ids);
+ free(priorities);
+ free(keys);
+ return;
+ }
-/*
- * p firewall del bulk
- */
+ status = app_pipeline_firewall_add_bulk(app,
+ params->pipeline_id,
+ keys,
+ n_keys,
+ priorities,
+ port_ids);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add bulk");
+
+ free(keys);
+ free(priorities);
+ free(port_ids);
+ return;
+ } /* firewall add bulk */
-struct cmd_firewall_del_bulk_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t bulk_string;
- cmdline_fixed_string_t file_path;
-};
+ /* firewall add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
-static void
-cmd_firewall_del_bulk_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_del_bulk_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add default");
+ return;
+ }
- struct app_pipeline_del_bulk_params del_bulk_params;
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
- status = app_pipeline_del_bulk_parse_file(params->file_path, &del_bulk_params);
- if (status != 0) {
- printf("Command failed\n");
- goto end;
- }
+ status = app_pipeline_firewall_add_default_rule(app,
+ params->pipeline_id,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add default");
- status = app_pipeline_firewall_delete_bulk(app, params->pipeline_id,
- del_bulk_params.keys, del_bulk_params.n_keys);
- if (status != 0) {
- printf("Command failed\n");
- goto end;
- }
+ return;
+ } /* firewall add default */
+
+ /* firewall del */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0)) {
+ struct pipeline_firewall_key key;
+ struct in_addr sipaddr;
+ uint32_t sipdepth;
+ struct in_addr dipaddr;
+ uint32_t dipdepth;
+ uint16_t sport0;
+ uint16_t sport1;
+ uint16_t dport0;
+ uint16_t dport1;
+ uint8_t proto;
+ uint8_t protomask;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 12) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del");
+ return;
+ }
-end:
- rte_free(del_bulk_params.keys);
-}
+ if (parse_ipv4_addr(tokens[2], &sipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "sipaddr");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result, p_string,
- "p");
+ if (parser_read_uint32(&sipdepth, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "sipdepth");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_bulk_result, pipeline_id,
- UINT32);
+ if (parse_ipv4_addr(tokens[4], &dipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "dipaddr");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
- firewall_string, "firewall");
+ if (parser_read_uint32(&dipdepth, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "dipdepth");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
- del_string, "del");
+ if (parser_read_uint16(&sport0, tokens[6])) {
+ printf(CMD_MSG_INVALID_ARG, "sport0");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_bulk_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
- bulk_string, "bulk");
+ if (parser_read_uint16(&sport1, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "sport1");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_file_path_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
- file_path, NULL);
+ if (parser_read_uint16(&dport0, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "dport0");
+ return;
+ }
-cmdline_parse_inst_t cmd_firewall_del_bulk = {
- .f = cmd_firewall_del_bulk_parsed,
- .data = NULL,
- .help_str = "Firewall rule del bulk",
- .tokens = {
- (void *) &cmd_firewall_del_bulk_p_string,
- (void *) &cmd_firewall_del_bulk_pipeline_id,
- (void *) &cmd_firewall_del_bulk_firewall_string,
- (void *) &cmd_firewall_del_bulk_add_string,
- (void *) &cmd_firewall_del_bulk_bulk_string,
- (void *) &cmd_firewall_del_bulk_file_path_string,
- NULL,
- },
-};
+ if (parser_read_uint16(&dport1, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "dport1");
+ return;
+ }
-/*
- * p firewall add default
- */
-struct cmd_firewall_add_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t default_string;
- uint8_t port_id;
-};
+ if (parser_read_uint8(&proto, tokens[10])) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-static void
-cmd_firewall_add_default_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_add_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ if (parser_read_uint8_hex(&protomask, tokens[11])) {
+ printf(CMD_MSG_INVALID_ARG, "protomask");
+ return;
+ }
- status = app_pipeline_firewall_add_default_rule(app,
- params->pipeline_id,
- params->port_id);
+ key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.src_ip = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.src_ip_mask = sipdepth;
+ key.key.ipv4_5tuple.dst_ip = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ key.key.ipv4_5tuple.src_port_from = sport0;
+ key.key.ipv4_5tuple.src_port_to = sport1;
+ key.key.ipv4_5tuple.dst_port_from = dport0;
+ key.key.ipv4_5tuple.dst_port_to = dport1;
+ key.key.ipv4_5tuple.proto = proto;
+ key.key.ipv4_5tuple.proto_mask = protomask;
+
+ status = app_pipeline_firewall_delete_rule(app,
+ params->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-cmdline_parse_token_string_t cmd_firewall_add_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_firewall_add_default_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_default_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_firewall_add_default_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
- firewall_string, "firewall");
-
-cmdline_parse_token_string_t cmd_firewall_add_default_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
- add_string, "add");
+ } /* firewall del */
+
+ /* firewall del bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_firewall_key *keys;
+ uint32_t *priorities, *port_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del bulk");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_add_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
- default_string, "default");
+ filename = tokens[2];
-cmdline_parse_token_num_t cmd_firewall_add_default_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_default_result, port_id,
- UINT8);
+ n_keys = APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_firewall_key));
+ if (keys == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
+ memset(keys, 0, n_keys * sizeof(struct pipeline_firewall_key));
-cmdline_parse_inst_t cmd_firewall_add_default = {
- .f = cmd_firewall_add_default_parsed,
- .data = NULL,
- .help_str = "Firewall default rule add",
- .tokens = {
- (void *) &cmd_firewall_add_default_p_string,
- (void *) &cmd_firewall_add_default_pipeline_id,
- (void *) &cmd_firewall_add_default_firewall_string,
- (void *) &cmd_firewall_add_default_add_string,
- (void *) &cmd_firewall_add_default_default_string,
- (void *) &cmd_firewall_add_default_port_id,
- NULL,
- },
-};
+ priorities = malloc(n_keys * sizeof(uint32_t));
+ if (priorities == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(keys);
+ return;
+ }
-/*
- * p firewall del default
- */
-struct cmd_firewall_del_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t default_string;
-};
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(priorities);
+ free(keys);
+ return;
+ }
-static void
-cmd_firewall_del_default_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_del_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ status = app_pipeline_firewall_load_file(filename,
+ keys,
+ priorities,
+ port_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(port_ids);
+ free(priorities);
+ free(keys);
+ return;
+ }
- status = app_pipeline_firewall_delete_default_rule(app,
- params->pipeline_id);
+ status = app_pipeline_firewall_delete_bulk(app,
+ params->pipeline_id,
+ keys,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del bulk");
- if (status != 0) {
- printf("Command failed\n");
+ free(port_ids);
+ free(priorities);
+ free(keys);
return;
- }
-}
-
-cmdline_parse_token_string_t cmd_firewall_del_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_firewall_del_default_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_default_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_firewall_del_default_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
- firewall_string, "firewall");
-
-cmdline_parse_token_string_t cmd_firewall_del_default_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
- del_string, "del");
+ } /* firewall del bulk */
+
+ /* firewall del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del default");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
- default_string, "default");
+ status = app_pipeline_firewall_delete_default_rule(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del default");
-cmdline_parse_inst_t cmd_firewall_del_default = {
- .f = cmd_firewall_del_default_parsed,
- .data = NULL,
- .help_str = "Firewall default rule delete",
- .tokens = {
- (void *) &cmd_firewall_del_default_p_string,
- (void *) &cmd_firewall_del_default_pipeline_id,
- (void *) &cmd_firewall_del_default_firewall_string,
- (void *) &cmd_firewall_del_default_del_string,
- (void *) &cmd_firewall_del_default_default_string,
- NULL,
- },
-};
+ return;
-/*
- * p firewall ls
- */
+ } /* firewall del default */
-struct cmd_firewall_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t ls_string;
-};
-
-static void
-cmd_firewall_ls_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ /* firewall ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall ls");
+ return;
+ }
- status = app_pipeline_firewall_ls(app, params->pipeline_id);
+ status = app_pipeline_firewall_ls(app, params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall ls");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
+ } /* firewall ls */
+
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall");
}
-cmdline_parse_token_string_t cmd_firewall_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result, p_string,
- "p");
+static cmdline_parse_token_string_t cmd_firewall_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, p_string, "p");
-cmdline_parse_token_num_t cmd_firewall_ls_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_ls_result, pipeline_id,
- UINT32);
+static cmdline_parse_token_num_t cmd_firewall_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_result, pipeline_id, UINT32);
-cmdline_parse_token_string_t cmd_firewall_ls_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result,
- firewall_string, "firewall");
+static cmdline_parse_token_string_t cmd_firewall_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, firewall_string,
+ "firewall");
-cmdline_parse_token_string_t cmd_firewall_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_firewall_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_inst_t cmd_firewall_ls = {
- .f = cmd_firewall_ls_parsed,
+static cmdline_parse_inst_t cmd_firewall = {
+ .f = cmd_firewall_parsed,
.data = NULL,
- .help_str = "Firewall rule list",
+ .help_str = "firewall add / add bulk / add default / del / del bulk"
+ " / del default / ls",
.tokens = {
- (void *) &cmd_firewall_ls_p_string,
- (void *) &cmd_firewall_ls_pipeline_id,
- (void *) &cmd_firewall_ls_firewall_string,
- (void *) &cmd_firewall_ls_ls_string,
+ (void *) &cmd_firewall_p_string,
+ (void *) &cmd_firewall_pipeline_id,
+ (void *) &cmd_firewall_firewall_string,
+ (void *) &cmd_firewall_multi_string,
NULL,
},
};
static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_firewall_add_ipv4,
- (cmdline_parse_inst_t *) &cmd_firewall_del_ipv4,
- (cmdline_parse_inst_t *) &cmd_firewall_add_bulk,
- (cmdline_parse_inst_t *) &cmd_firewall_del_bulk,
- (cmdline_parse_inst_t *) &cmd_firewall_add_default,
- (cmdline_parse_inst_t *) &cmd_firewall_del_default,
- (cmdline_parse_inst_t *) &cmd_firewall_ls,
+ (cmdline_parse_inst_t *) &cmd_firewall,
NULL,
};
static struct pipeline_fe_ops pipeline_firewall_fe_ops = {
.f_init = app_pipeline_firewall_init,
+ .f_post_init = NULL,
.f_free = app_pipeline_firewall_free,
+ .f_track = app_pipeline_track_default,
.cmds = pipeline_cmds,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall.h b/examples/ip_pipeline/pipeline/pipeline_firewall.h
index ccc4e64b..aa79a2a0 100644
--- a/examples/ip_pipeline/pipeline/pipeline_firewall.h
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall.h
@@ -72,6 +72,18 @@ int
app_pipeline_firewall_delete_default_rule(struct app_params *app,
uint32_t pipeline_id);
+#ifndef APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE
+#define APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE 65536
+#endif
+
+int
+app_pipeline_firewall_load_file(char *filename,
+ struct pipeline_firewall_key *keys,
+ uint32_t *priorities,
+ uint32_t *port_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
extern struct pipeline_type pipeline_firewall;
#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
index e7a8a4c5..b61f3034 100644
--- a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
@@ -565,27 +565,6 @@ pipeline_firewall_free(void *pipeline)
}
static int
-pipeline_firewall_track(void *pipeline,
- __rte_unused uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_in == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-static int
pipeline_firewall_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
@@ -732,7 +711,7 @@ pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
n_keys = req->n_keys;
for (i = 0; i < n_keys; i++) {
- entries[i] = rte_malloc(NULL,
+ entries[i] = rte_zmalloc(NULL,
sizeof(struct firewall_table_entry),
RTE_CACHE_LINE_SIZE);
if (entries[i] == NULL) {
@@ -740,7 +719,7 @@ pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
return rsp;
}
- params[i] = rte_malloc(NULL,
+ params[i] = rte_zmalloc(NULL,
sizeof(struct rte_table_acl_rule_add_params),
RTE_CACHE_LINE_SIZE);
if (params[i] == NULL) {
@@ -814,7 +793,7 @@ pipeline_firewall_msg_req_del_bulk_handler(struct pipeline *p, void *msg)
n_keys = req->n_keys;
for (i = 0; i < n_keys; i++) {
- params[i] = rte_malloc(NULL,
+ params[i] = rte_zmalloc(NULL,
sizeof(struct rte_table_acl_rule_delete_params),
RTE_CACHE_LINE_SIZE);
if (params[i] == NULL) {
@@ -903,5 +882,4 @@ struct pipeline_be_ops pipeline_firewall_be_ops = {
.f_free = pipeline_firewall_free,
.f_run = NULL,
.f_timer = pipeline_firewall_timer,
- .f_track = pipeline_firewall_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions.c b/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
index 4012121f..bf12fd7b 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
@@ -35,6 +35,7 @@
#include <string.h>
#include <sys/queue.h>
#include <netinet/in.h>
+#include <unistd.h>
#include <rte_common.h>
#include <rte_hexdump.h>
@@ -43,13 +44,12 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
#include "app.h"
#include "pipeline_common_fe.h"
#include "pipeline_flow_actions.h"
#include "hash_func.h"
+#include "parser.h"
/*
* Flow actions pipeline
@@ -689,1121 +689,620 @@ app_pipeline_fa_dscp_ls(struct app_params *app,
return 0;
}
-/*
- * Flow meter configuration (single flow)
- *
- * p <pipeline ID> flow <flow ID> meter <meter ID> trtcm <trtcm params>
- */
-
-struct cmd_fa_meter_config_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- uint32_t flow_id;
- cmdline_fixed_string_t meter_string;
- uint32_t meter_id;
- cmdline_fixed_string_t trtcm_string;
- uint64_t cir;
- uint64_t pir;
- uint64_t cbs;
- uint64_t pbs;
-};
-
-static void
-cmd_fa_meter_config_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_meter_config_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params flow_params;
- int status;
-
- if (params->meter_id >= PIPELINE_FA_N_TC_MAX) {
- printf("Command failed\n");
- return;
- }
-
- flow_params.m[params->meter_id].cir = params->cir;
- flow_params.m[params->meter_id].pir = params->pir;
- flow_params.m[params->meter_id].cbs = params->cbs;
- flow_params.m[params->meter_id].pbs = params->pbs;
-
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- params->flow_id,
- 1 << params->meter_id,
- 0,
- 0,
- &flow_params);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fa_meter_config_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
- flow_string, "flow");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
- flow_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_meter_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
- meter_string, "meter");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_meter_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
- meter_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_trtcm_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
- trtcm_string, "trtcm");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_cir =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, cir, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_pir =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, pir, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_cbs =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, cbs, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_pbs =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, pbs, UINT64);
-
-cmdline_parse_inst_t cmd_fa_meter_config = {
- .f = cmd_fa_meter_config_parsed,
- .data = NULL,
- .help_str = "Flow meter configuration (single flow) ",
- .tokens = {
- (void *) &cmd_fa_meter_config_p_string,
- (void *) &cmd_fa_meter_config_pipeline_id,
- (void *) &cmd_fa_meter_config_flow_string,
- (void *) &cmd_fa_meter_config_flow_id,
- (void *) &cmd_fa_meter_config_meter_string,
- (void *) &cmd_fa_meter_config_meter_id,
- (void *) &cmd_fa_meter_config_trtcm_string,
- (void *) &cmd_fa_meter_config_cir,
- (void *) &cmd_fa_meter_config_pir,
- (void *) &cmd_fa_meter_config_cbs,
- (void *) &cmd_fa_meter_config_pbs,
- NULL,
- },
-};
-
-/*
- * Flow meter configuration (multiple flows)
- *
- * p <pipeline ID> flows <n_flows> meter <meter ID> trtcm <trtcm params>
- */
-
-struct cmd_fa_meter_config_bulk_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flows_string;
- uint32_t n_flows;
- cmdline_fixed_string_t meter_string;
- uint32_t meter_id;
- cmdline_fixed_string_t trtcm_string;
- uint64_t cir;
- uint64_t pir;
- uint64_t cbs;
- uint64_t pbs;
-};
-
-static void
-cmd_fa_meter_config_bulk_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
+int
+app_pipeline_fa_load_file(char *filename,
+ uint32_t *flow_ids,
+ struct pipeline_fa_flow_params *p,
+ uint32_t *n_flows,
+ uint32_t *line)
{
- struct cmd_fa_meter_config_bulk_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params flow_template, *flow_params;
- uint32_t *flow_id;
- uint32_t i;
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
- if ((params->n_flows == 0) ||
- (params->meter_id >= PIPELINE_FA_N_TC_MAX)) {
- printf("Invalid arguments\n");
- return;
- }
-
- flow_id = (uint32_t *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (flow_ids == NULL) ||
+ (p == NULL) ||
+ (n_flows == NULL) ||
+ (*n_flows == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
- flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
- RTE_CACHE_LINE_SIZE);
- if (flow_params == NULL) {
- rte_free(flow_id);
- printf("Memory allocation failed\n");
- return;
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
}
- memset(&flow_template, 0, sizeof(flow_template));
- flow_template.m[params->meter_id].cir = params->cir;
- flow_template.m[params->meter_id].pir = params->pir;
- flow_template.m[params->meter_id].cbs = params->cbs;
- flow_template.m[params->meter_id].pbs = params->pbs;
+ /* Read file */
+ for (i = 0, l = 1; i < *n_flows; l++) {
+ char *tokens[64];
+ uint32_t n_tokens = RTE_DIM(tokens);
- for (i = 0; i < params->n_flows; i++) {
- uint32_t pos = i % N_FLOWS_BULK;
+ int status;
- flow_id[pos] = i;
- memcpy(&flow_params[pos],
- &flow_template,
- sizeof(flow_template));
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
- if ((pos == N_FLOWS_BULK - 1) ||
- (i == params->n_flows - 1)) {
- int status;
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
- status = app_pipeline_fa_flow_config_bulk(app,
- params->pipeline_id,
- flow_id,
- pos + 1,
- 1 << params->meter_id,
- 0,
- 0,
- flow_params);
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
- if (status != 0) {
- printf("Command failed\n");
- break;
- }
- }
+ if ((n_tokens != 64) ||
+ /* flow */
+ strcmp(tokens[0], "flow") ||
+ parser_read_uint32(&flow_ids[i], tokens[1]) ||
+
+ /* meter & policer 0 */
+ strcmp(tokens[2], "meter") ||
+ strcmp(tokens[3], "0") ||
+ strcmp(tokens[4], "trtcm") ||
+ parser_read_uint64(&p[i].m[0].cir, tokens[5]) ||
+ parser_read_uint64(&p[i].m[0].pir, tokens[6]) ||
+ parser_read_uint64(&p[i].m[0].cbs, tokens[7]) ||
+ parser_read_uint64(&p[i].m[0].pbs, tokens[8]) ||
+ strcmp(tokens[9], "policer") ||
+ strcmp(tokens[10], "0") ||
+ strcmp(tokens[11], "g") ||
+ string_to_policer_action(tokens[12],
+ &p[i].p[0].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[13], "y") ||
+ string_to_policer_action(tokens[14],
+ &p[i].p[0].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[15], "r") ||
+ string_to_policer_action(tokens[16],
+ &p[i].p[0].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 1 */
+ strcmp(tokens[17], "meter") ||
+ strcmp(tokens[18], "1") ||
+ strcmp(tokens[19], "trtcm") ||
+ parser_read_uint64(&p[i].m[1].cir, tokens[20]) ||
+ parser_read_uint64(&p[i].m[1].pir, tokens[21]) ||
+ parser_read_uint64(&p[i].m[1].cbs, tokens[22]) ||
+ parser_read_uint64(&p[i].m[1].pbs, tokens[23]) ||
+ strcmp(tokens[24], "policer") ||
+ strcmp(tokens[25], "1") ||
+ strcmp(tokens[26], "g") ||
+ string_to_policer_action(tokens[27],
+ &p[i].p[1].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[28], "y") ||
+ string_to_policer_action(tokens[29],
+ &p[i].p[1].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[30], "r") ||
+ string_to_policer_action(tokens[31],
+ &p[i].p[1].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 2 */
+ strcmp(tokens[32], "meter") ||
+ strcmp(tokens[33], "2") ||
+ strcmp(tokens[34], "trtcm") ||
+ parser_read_uint64(&p[i].m[2].cir, tokens[35]) ||
+ parser_read_uint64(&p[i].m[2].pir, tokens[36]) ||
+ parser_read_uint64(&p[i].m[2].cbs, tokens[37]) ||
+ parser_read_uint64(&p[i].m[2].pbs, tokens[38]) ||
+ strcmp(tokens[39], "policer") ||
+ strcmp(tokens[40], "2") ||
+ strcmp(tokens[41], "g") ||
+ string_to_policer_action(tokens[42],
+ &p[i].p[2].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[43], "y") ||
+ string_to_policer_action(tokens[44],
+ &p[i].p[2].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[45], "r") ||
+ string_to_policer_action(tokens[46],
+ &p[i].p[2].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 3 */
+ strcmp(tokens[47], "meter") ||
+ strcmp(tokens[48], "3") ||
+ strcmp(tokens[49], "trtcm") ||
+ parser_read_uint64(&p[i].m[3].cir, tokens[50]) ||
+ parser_read_uint64(&p[i].m[3].pir, tokens[51]) ||
+ parser_read_uint64(&p[i].m[3].cbs, tokens[52]) ||
+ parser_read_uint64(&p[i].m[3].pbs, tokens[53]) ||
+ strcmp(tokens[54], "policer") ||
+ strcmp(tokens[55], "3") ||
+ strcmp(tokens[56], "g") ||
+ string_to_policer_action(tokens[57],
+ &p[i].p[3].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[58], "y") ||
+ string_to_policer_action(tokens[59],
+ &p[i].p[3].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[60], "r") ||
+ string_to_policer_action(tokens[61],
+ &p[i].p[3].action[e_RTE_METER_RED]) ||
+
+ /* port */
+ strcmp(tokens[62], "port") ||
+ parser_read_uint32(&p[i].port_id, tokens[63]))
+ goto error1;
+
+ i++;
}
- rte_free(flow_params);
- rte_free(flow_id);
+ /* Close file */
+ *n_flows = i;
+ fclose(f);
+ return 0;
+error1:
+ *line = l;
+ fclose(f);
+ return -1;
}
-cmdline_parse_token_string_t cmd_fa_meter_config_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_bulk_flows_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- flows_string, "flows");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- n_flows, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_bulk_meter_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- meter_string, "meter");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_meter_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- meter_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_bulk_trtcm_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- trtcm_string, "trtcm");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_cir =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- cir, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pir =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- pir, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_cbs =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- cbs, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pbs =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- pbs, UINT64);
-
-cmdline_parse_inst_t cmd_fa_meter_config_bulk = {
- .f = cmd_fa_meter_config_bulk_parsed,
- .data = NULL,
- .help_str = "Flow meter configuration (multiple flows)",
- .tokens = {
- (void *) &cmd_fa_meter_config_bulk_p_string,
- (void *) &cmd_fa_meter_config_bulk_pipeline_id,
- (void *) &cmd_fa_meter_config_bulk_flows_string,
- (void *) &cmd_fa_meter_config_bulk_n_flows,
- (void *) &cmd_fa_meter_config_bulk_meter_string,
- (void *) &cmd_fa_meter_config_bulk_meter_id,
- (void *) &cmd_fa_meter_config_bulk_trtcm_string,
- (void *) &cmd_fa_meter_config_cir,
- (void *) &cmd_fa_meter_config_pir,
- (void *) &cmd_fa_meter_config_cbs,
- (void *) &cmd_fa_meter_config_pbs,
- NULL,
- },
-};
-
/*
- * Flow policer configuration (single flow)
+ * action
*
- * p <pipeline ID> flow <flow ID> policer <policer ID>
- * G <action> Y <action> R <action>
+ * flow meter, policer and output port configuration:
+ * p <pipelineid> action flow <flowid> meter <meterid> trtcm <cir> <pir> <cbs> <pbs>
*
- * <action> = G (green) | Y (yellow) | R (red) | D (drop)
- */
-
-struct cmd_fa_policer_config_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- uint32_t flow_id;
- cmdline_fixed_string_t policer_string;
- uint32_t policer_id;
- cmdline_fixed_string_t green_string;
- cmdline_fixed_string_t g_action;
- cmdline_fixed_string_t yellow_string;
- cmdline_fixed_string_t y_action;
- cmdline_fixed_string_t red_string;
- cmdline_fixed_string_t r_action;
-};
-
-static void
-cmd_fa_policer_config_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_policer_config_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params flow_params;
- int status;
-
- if (params->policer_id >= PIPELINE_FA_N_TC_MAX) {
- printf("Command failed\n");
- return;
- }
-
- status = string_to_policer_action(params->g_action,
- &flow_params.p[params->policer_id].action[e_RTE_METER_GREEN]);
- if (status)
- printf("Invalid policer green action\n");
-
- status = string_to_policer_action(params->y_action,
- &flow_params.p[params->policer_id].action[e_RTE_METER_YELLOW]);
- if (status)
- printf("Invalid policer yellow action\n");
-
- status = string_to_policer_action(params->r_action,
- &flow_params.p[params->policer_id].action[e_RTE_METER_RED]);
- if (status)
- printf("Invalid policer red action\n");
-
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- params->flow_id,
- 0,
- 1 << params->policer_id,
- 0,
- &flow_params);
-
- if (status != 0)
- printf("Command failed\n");
-
-}
-
-cmdline_parse_token_string_t cmd_fa_policer_config_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- flow_string, "flow");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
- flow_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_policer_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- policer_string, "policer");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_policer_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
- policer_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_green_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- green_string, "G");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_g_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- g_action, "R#Y#G#D");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_yellow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- yellow_string, "Y");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_y_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- y_action, "R#Y#G#D");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_red_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- red_string, "R");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_r_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- r_action, "R#Y#G#D");
-
-cmdline_parse_inst_t cmd_fa_policer_config = {
- .f = cmd_fa_policer_config_parsed,
- .data = NULL,
- .help_str = "Flow policer configuration (single flow)",
- .tokens = {
- (void *) &cmd_fa_policer_config_p_string,
- (void *) &cmd_fa_policer_config_pipeline_id,
- (void *) &cmd_fa_policer_config_flow_string,
- (void *) &cmd_fa_policer_config_flow_id,
- (void *) &cmd_fa_policer_config_policer_string,
- (void *) &cmd_fa_policer_config_policer_id,
- (void *) &cmd_fa_policer_config_green_string,
- (void *) &cmd_fa_policer_config_g_action,
- (void *) &cmd_fa_policer_config_yellow_string,
- (void *) &cmd_fa_policer_config_y_action,
- (void *) &cmd_fa_policer_config_red_string,
- (void *) &cmd_fa_policer_config_r_action,
- NULL,
- },
-};
-
-/*
- * Flow policer configuration (multiple flows)
+ * p <pipelineid> action flow <flowid> policer <policerid> g <gaction> y <yaction> r <raction>
+ * <action> is one of the following:
+ * G = recolor to green
+ * Y = recolor as yellow
+ * R = recolor as red
+ * D = drop
*
- * p <pipeline ID> flows <n_flows> policer <policer ID>
- * G <action> Y <action> R <action>
+ * p <pipelineid> action flow <flowid> port <port ID>
*
- * <action> = G (green) | Y (yellow) | R (red) | D (drop)
- */
+ * p <pipelineid> action flow bulk <file>
+ *
+ * flow policer stats read:
+ * p <pipelineid> action flow <flowid> stats
+ *
+ * flow ls:
+ * p <pipelineid> action flow ls
+ *
+ * dscp table configuration:
+ * p <pipelineid> action dscp <dscpid> class <class ID> color <color>
+ *
+ * dscp table ls:
+ * p <pipelineid> action dscp ls
+**/
-struct cmd_fa_policer_config_bulk_result {
+struct cmd_action_result {
cmdline_fixed_string_t p_string;
uint32_t pipeline_id;
- cmdline_fixed_string_t flows_string;
- uint32_t n_flows;
- cmdline_fixed_string_t policer_string;
- uint32_t policer_id;
- cmdline_fixed_string_t green_string;
- cmdline_fixed_string_t g_action;
- cmdline_fixed_string_t yellow_string;
- cmdline_fixed_string_t y_action;
- cmdline_fixed_string_t red_string;
- cmdline_fixed_string_t r_action;
+ cmdline_fixed_string_t action_string;
+ cmdline_multi_string_t multi_string;
};
static void
-cmd_fa_policer_config_bulk_parsed(
+cmd_action_parsed(
void *parsed_result,
__rte_unused struct cmdline *cl,
void *data)
{
- struct cmd_fa_policer_config_bulk_result *params = parsed_result;
+ struct cmd_action_result *params = parsed_result;
struct app_params *app = data;
- struct pipeline_fa_flow_params flow_template, *flow_params;
- uint32_t *flow_id, i;
- int status;
- if ((params->n_flows == 0) ||
- (params->policer_id >= PIPELINE_FA_N_TC_MAX)) {
- printf("Invalid arguments\n");
- return;
- }
-
- flow_id = (uint32_t *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
- flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
- RTE_CACHE_LINE_SIZE);
- if (flow_params == NULL) {
- rte_free(flow_id);
- printf("Memory allocation failed\n");
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status != 0) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "action");
return;
}
- memset(&flow_template, 0, sizeof(flow_template));
-
- status = string_to_policer_action(params->g_action,
- &flow_template.p[params->policer_id].action[e_RTE_METER_GREEN]);
- if (status)
- printf("Invalid policer green action\n");
-
- status = string_to_policer_action(params->y_action,
- &flow_template.p[params->policer_id].action[e_RTE_METER_YELLOW]);
- if (status)
- printf("Invalid policer yellow action\n");
-
- status = string_to_policer_action(params->r_action,
- &flow_template.p[params->policer_id].action[e_RTE_METER_RED]);
- if (status)
- printf("Invalid policer red action\n");
-
- for (i = 0; i < params->n_flows; i++) {
- uint32_t pos = i % N_FLOWS_BULK;
-
- flow_id[pos] = i;
- memcpy(&flow_params[pos], &flow_template,
- sizeof(flow_template));
-
- if ((pos == N_FLOWS_BULK - 1) ||
- (i == params->n_flows - 1)) {
- int status;
-
- status = app_pipeline_fa_flow_config_bulk(app,
- params->pipeline_id,
- flow_id,
- pos + 1,
- 0,
- 1 << params->policer_id,
- 0,
- flow_params);
-
- if (status != 0) {
- printf("Command failed\n");
-
- break;
- }
+ /* action flow meter */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "meter") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, meter_id;
+
+ if (n_tokens != 9) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow meter");
+ return;
}
- }
-
- rte_free(flow_params);
- rte_free(flow_id);
-
-}
-
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_flows_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- flows_string, "flows");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_bulk_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- n_flows, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_policer_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- policer_string, "policer");
-cmdline_parse_token_num_t cmd_fa_policer_config_bulk_policer_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- policer_id, UINT32);
+ memset(&flow_params, 0, sizeof(flow_params));
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_green_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- green_string, "G");
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_g_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- g_action, "R#Y#G#D");
+ if (parser_read_uint32(&meter_id, tokens[3]) ||
+ (meter_id >= PIPELINE_FA_N_TC_MAX)) {
+ printf(CMD_MSG_INVALID_ARG, "meterid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_yellow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- yellow_string, "Y");
+ if (strcmp(tokens[4], "trtcm")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "trtcm");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_y_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- y_action, "R#Y#G#D");
+ if (parser_read_uint64(&flow_params.m[meter_id].cir, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "cir");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_red_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- red_string, "R");
+ if (parser_read_uint64(&flow_params.m[meter_id].pir, tokens[6])) {
+ printf(CMD_MSG_INVALID_ARG, "pir");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_r_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- r_action, "R#Y#G#D");
+ if (parser_read_uint64(&flow_params.m[meter_id].cbs, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "cbs");
+ return;
+ }
-cmdline_parse_inst_t cmd_fa_policer_config_bulk = {
- .f = cmd_fa_policer_config_bulk_parsed,
- .data = NULL,
- .help_str = "Flow policer configuration (multiple flows)",
- .tokens = {
- (void *) &cmd_fa_policer_config_bulk_p_string,
- (void *) &cmd_fa_policer_config_bulk_pipeline_id,
- (void *) &cmd_fa_policer_config_bulk_flows_string,
- (void *) &cmd_fa_policer_config_bulk_n_flows,
- (void *) &cmd_fa_policer_config_bulk_policer_string,
- (void *) &cmd_fa_policer_config_bulk_policer_id,
- (void *) &cmd_fa_policer_config_bulk_green_string,
- (void *) &cmd_fa_policer_config_bulk_g_action,
- (void *) &cmd_fa_policer_config_bulk_yellow_string,
- (void *) &cmd_fa_policer_config_bulk_y_action,
- (void *) &cmd_fa_policer_config_bulk_red_string,
- (void *) &cmd_fa_policer_config_bulk_r_action,
- NULL,
- },
-};
+ if (parser_read_uint64(&flow_params.m[meter_id].pbs, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "pbs");
+ return;
+ }
-/*
- * Flow output port configuration (single flow)
- *
- * p <pipeline ID> flow <flow ID> port <port ID>
- */
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 1 << meter_id,
+ 0,
+ 0,
+ &flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow meter");
-struct cmd_fa_output_port_config_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- uint32_t flow_id;
- cmdline_fixed_string_t port_string;
- uint32_t port_id;
-};
+ return;
+ } /* action flow meter */
+
+ /* action flow policer */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "policer") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, policer_id;
+
+ if (n_tokens != 10) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow policer");
+ return;
+ }
-static void
-cmd_fa_output_port_config_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_output_port_config_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params flow_params;
- int status;
+ memset(&flow_params, 0, sizeof(flow_params));
- flow_params.port_id = params->port_id;
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- params->flow_id,
- 0,
- 0,
- 1,
- &flow_params);
+ if (parser_read_uint32(&policer_id, tokens[3]) ||
+ (policer_id >= PIPELINE_FA_N_TC_MAX)) {
+ printf(CMD_MSG_INVALID_ARG, "policerid");
+ return;
+ }
- if (status != 0)
- printf("Command failed\n");
-}
+ if (strcmp(tokens[4], "g")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "g");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_output_port_config_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
- p_string, "p");
+ if (string_to_policer_action(tokens[5],
+ &flow_params.p[policer_id].action[e_RTE_METER_GREEN])) {
+ printf(CMD_MSG_INVALID_ARG, "gaction");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fa_output_port_config_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
- pipeline_id, UINT32);
+ if (strcmp(tokens[6], "y")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "y");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_output_port_config_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
- flow_string, "flow");
+ if (string_to_policer_action(tokens[7],
+ &flow_params.p[policer_id].action[e_RTE_METER_YELLOW])) {
+ printf(CMD_MSG_INVALID_ARG, "yaction");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fa_output_port_config_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
- flow_id, UINT32);
+ if (strcmp(tokens[8], "r")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "r");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_output_port_config_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
- port_string, "port");
+ if (string_to_policer_action(tokens[9],
+ &flow_params.p[policer_id].action[e_RTE_METER_RED])) {
+ printf(CMD_MSG_INVALID_ARG, "raction");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fa_output_port_config_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
- port_id, UINT32);
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 0,
+ 1 << policer_id,
+ 0,
+ &flow_params);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "action flow policer");
-cmdline_parse_inst_t cmd_fa_output_port_config = {
- .f = cmd_fa_output_port_config_parsed,
- .data = NULL,
- .help_str = "Flow output port configuration (single flow)",
- .tokens = {
- (void *) &cmd_fa_output_port_config_p_string,
- (void *) &cmd_fa_output_port_config_pipeline_id,
- (void *) &cmd_fa_output_port_config_flow_string,
- (void *) &cmd_fa_output_port_config_flow_id,
- (void *) &cmd_fa_output_port_config_port_string,
- (void *) &cmd_fa_output_port_config_port_id,
- NULL,
- },
-};
+ return;
+ } /* action flow policer */
+
+ /* action flow port */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "port") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, port_id;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow port");
+ return;
+ }
-/*
- * Flow output port configuration (multiple flows)
- *
- * p <pipeline ID> flows <n_flows> ports <n_ports>
- */
+ memset(&flow_params, 0, sizeof(flow_params));
-struct cmd_fa_output_port_config_bulk_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flows_string;
- uint32_t n_flows;
- cmdline_fixed_string_t ports_string;
- uint32_t n_ports;
-};
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-static void
-cmd_fa_output_port_config_bulk_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_output_port_config_bulk_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params *flow_params;
- uint32_t *flow_id;
- uint32_t i;
+ if (parser_read_uint32(&port_id, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
- if (params->n_flows == 0) {
- printf("Invalid arguments\n");
- return;
- }
+ flow_params.port_id = port_id;
- flow_id = (uint32_t *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 0,
+ 0,
+ 1,
+ &flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow port");
- flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
- RTE_CACHE_LINE_SIZE);
- if (flow_params == NULL) {
- rte_free(flow_id);
- printf("Memory allocation failed\n");
return;
- }
-
- for (i = 0; i < params->n_flows; i++) {
- uint32_t pos = i % N_FLOWS_BULK;
- uint32_t port_id = i % params->n_ports;
-
- flow_id[pos] = i;
-
- memset(&flow_params[pos], 0, sizeof(flow_params[pos]));
- flow_params[pos].port_id = port_id;
+ } /* action flow port */
+
+ /* action flow stats */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "stats") == 0)) {
+ struct pipeline_fa_policer_stats stats;
+ uint32_t flow_id, policer_id;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow stats");
+ return;
+ }
- if ((pos == N_FLOWS_BULK - 1) ||
- (i == params->n_flows - 1)) {
- int status;
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
- status = app_pipeline_fa_flow_config_bulk(app,
+ for (policer_id = 0;
+ policer_id < PIPELINE_FA_N_TC_MAX;
+ policer_id++) {
+ status = app_pipeline_fa_flow_policer_stats_read(app,
params->pipeline_id,
flow_id,
- pos + 1,
- 0,
- 0,
+ policer_id,
1,
- flow_params);
-
+ &stats);
if (status != 0) {
- printf("Command failed\n");
-
- break;
+ printf(CMD_MSG_FAIL, "action flow stats");
+ return;
}
- }
- }
-
- rte_free(flow_params);
- rte_free(flow_id);
-
-}
-
-cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_flows_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- flows_string, "flows");
-
-cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- n_flows, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_ports_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- ports_string, "ports");
-
-cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_n_ports =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- n_ports, UINT32);
-
-cmdline_parse_inst_t cmd_fa_output_port_config_bulk = {
- .f = cmd_fa_output_port_config_bulk_parsed,
- .data = NULL,
- .help_str = "Flow output port configuration (multiple flows)",
- .tokens = {
- (void *) &cmd_fa_output_port_config_bulk_p_string,
- (void *) &cmd_fa_output_port_config_bulk_pipeline_id,
- (void *) &cmd_fa_output_port_config_bulk_flows_string,
- (void *) &cmd_fa_output_port_config_bulk_n_flows,
- (void *) &cmd_fa_output_port_config_bulk_ports_string,
- (void *) &cmd_fa_output_port_config_bulk_n_ports,
- NULL,
- },
-};
-
-/*
- * Flow DiffServ Code Point (DSCP) translation table configuration
- *
- * p <pipeline ID> dscp <DSCP ID> class <traffic class ID> color <color>
- *
- * <color> = G (green) | Y (yellow) | R (red)
-*/
-struct cmd_fa_dscp_config_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t dscp_string;
- uint32_t dscp_id;
- cmdline_fixed_string_t class_string;
- uint32_t traffic_class_id;
- cmdline_fixed_string_t color_string;
- cmdline_fixed_string_t color;
-
-};
-
-static void
-cmd_fa_dscp_config_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_dscp_config_result *params = parsed_result;
- struct app_params *app = data;
- enum rte_meter_color color;
- int status;
+ /* Display stats */
+ printf("\tPolicer: %" PRIu32
+ "\tPkts G: %" PRIu64
+ "\tPkts Y: %" PRIu64
+ "\tPkts R: %" PRIu64
+ "\tPkts D: %" PRIu64 "\n",
+ policer_id,
+ stats.n_pkts[e_RTE_METER_GREEN],
+ stats.n_pkts[e_RTE_METER_YELLOW],
+ stats.n_pkts[e_RTE_METER_RED],
+ stats.n_pkts_drop);
+ }
- status = string_to_color(params->color, &color);
- if (status) {
- printf("Invalid color\n");
return;
- }
-
- status = app_pipeline_fa_dscp_config(app,
- params->pipeline_id,
- params->dscp_id,
- params->traffic_class_id,
- color);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_dscp_config_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_dscp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- dscp_string, "dscp");
-
-cmdline_parse_token_num_t cmd_fa_dscp_config_dscp_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
- dscp_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_class_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- class_string, "class");
-
-cmdline_parse_token_num_t cmd_fa_dscp_config_traffic_class_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
- traffic_class_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_color_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- color_string, "color");
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_color =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- color, "G#Y#R");
+ } /* action flow stats */
+
+ /* action flow bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_fa_flow_params *flow_params;
+ uint32_t *flow_ids, n_flows, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow bulk");
+ return;
+ }
-cmdline_parse_inst_t cmd_fa_dscp_config = {
- .f = cmd_fa_dscp_config_parsed,
- .data = NULL,
- .help_str = "Flow DSCP translation table configuration",
- .tokens = {
- (void *) &cmd_fa_dscp_config_p_string,
- (void *) &cmd_fa_dscp_config_pipeline_id,
- (void *) &cmd_fa_dscp_config_dscp_string,
- (void *) &cmd_fa_dscp_config_dscp_id,
- (void *) &cmd_fa_dscp_config_class_string,
- (void *) &cmd_fa_dscp_config_traffic_class_id,
- (void *) &cmd_fa_dscp_config_color_string,
- (void *) &cmd_fa_dscp_config_color,
- NULL,
- },
-};
+ filename = tokens[2];
-/*
- * Flow policer stats read
- *
- * p <pipeline ID> flow <flow ID> policer <policer ID> stats
- */
+ n_flows = APP_PIPELINE_FA_MAX_RECORDS_IN_FILE;
+ flow_ids = malloc(n_flows * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
-struct cmd_fa_policer_stats_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- uint32_t flow_id;
- cmdline_fixed_string_t policer_string;
- uint32_t policer_id;
- cmdline_fixed_string_t stats_string;
-};
+ flow_params = malloc(n_flows * sizeof(struct pipeline_fa_flow_params));
+ if (flow_params == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(flow_ids);
+ return;
+ }
-static void
-cmd_fa_policer_stats_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_policer_stats_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_policer_stats stats;
- int status;
+ status = app_pipeline_fa_load_file(filename,
+ flow_ids,
+ flow_params,
+ &n_flows,
+ &line);
+ if (status) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_params);
+ free(flow_ids);
+ return;
+ }
- status = app_pipeline_fa_flow_policer_stats_read(app,
- params->pipeline_id,
- params->flow_id,
- params->policer_id,
- 1,
- &stats);
- if (status != 0) {
- printf("Command failed\n");
+ status = app_pipeline_fa_flow_config_bulk(app,
+ params->pipeline_id,
+ flow_ids,
+ n_flows,
+ 0xF,
+ 0xF,
+ 1,
+ flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow bulk");
+
+ free(flow_params);
+ free(flow_ids);
return;
- }
-
- /* Display stats */
- printf("\tPkts G: %" PRIu64
- "\tPkts Y: %" PRIu64
- "\tPkts R: %" PRIu64
- "\tPkts D: %" PRIu64 "\n",
- stats.n_pkts[e_RTE_METER_GREEN],
- stats.n_pkts[e_RTE_METER_YELLOW],
- stats.n_pkts[e_RTE_METER_RED],
- stats.n_pkts_drop);
-}
-
-cmdline_parse_token_string_t cmd_fa_policer_stats_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_policer_stats_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_stats_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
- flow_string, "flow");
-
-cmdline_parse_token_num_t cmd_fa_policer_stats_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
- flow_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_stats_policer_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
- policer_string, "policer");
-
-cmdline_parse_token_num_t cmd_fa_policer_stats_policer_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
- policer_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_stats_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
- stats_string, "stats");
-
-cmdline_parse_inst_t cmd_fa_policer_stats = {
- .f = cmd_fa_policer_stats_parsed,
- .data = NULL,
- .help_str = "Flow policer stats read",
- .tokens = {
- (void *) &cmd_fa_policer_stats_p_string,
- (void *) &cmd_fa_policer_stats_pipeline_id,
- (void *) &cmd_fa_policer_stats_flow_string,
- (void *) &cmd_fa_policer_stats_flow_id,
- (void *) &cmd_fa_policer_stats_policer_string,
- (void *) &cmd_fa_policer_stats_policer_id,
- (void *) &cmd_fa_policer_stats_string,
- NULL,
- },
-};
-
-/*
- * Flow list
- *
- * p <pipeline ID> flow ls
- */
+ } /* action flow bulk */
+
+ /* action flow ls */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ (strcmp(tokens[1], "ls") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow ls");
+ return;
+ }
-struct cmd_fa_flow_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t actions_string;
- cmdline_fixed_string_t ls_string;
-};
+ status = app_pipeline_fa_flow_ls(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow ls");
-static void
-cmd_fa_flow_ls_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_flow_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_fa_flow_ls(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
-}
+ return;
+ } /* action flow ls */
+
+ /* action dscp */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "dscp") == 0) &&
+ strcmp(tokens[1], "ls")) {
+ uint32_t dscp_id, tc_id;
+ enum rte_meter_color color;
+
+ if (n_tokens != 6) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action dscp");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_flow_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
- p_string, "p");
+ if (parser_read_uint32(&dscp_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "dscpid");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fa_flow_ls_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_flow_ls_result,
- pipeline_id, UINT32);
+ if (strcmp(tokens[2], "class")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "class");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_flow_ls_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
- flow_string, "flow");
+ if (parser_read_uint32(&tc_id, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "classid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_flow_ls_actions_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
- actions_string, "actions");
+ if (strcmp(tokens[4], "color")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "color");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_flow_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
- ls_string, "ls");
+ if (string_to_color(tokens[5], &color)) {
+ printf(CMD_MSG_INVALID_ARG, "colorid");
+ return;
+ }
-cmdline_parse_inst_t cmd_fa_flow_ls = {
- .f = cmd_fa_flow_ls_parsed,
- .data = NULL,
- .help_str = "Flow actions list",
- .tokens = {
- (void *) &cmd_fa_flow_ls_p_string,
- (void *) &cmd_fa_flow_ls_pipeline_id,
- (void *) &cmd_fa_flow_ls_flow_string,
- (void *) &cmd_fa_flow_ls_actions_string,
- (void *) &cmd_fa_flow_ls_ls_string,
- NULL,
- },
-};
+ status = app_pipeline_fa_dscp_config(app,
+ params->pipeline_id,
+ dscp_id,
+ tc_id,
+ color);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "action dscp");
-/*
- * Flow DiffServ Code Point (DSCP) translation table list
- *
- * p <pipeline ID> dscp ls
- */
+ return;
+ } /* action dscp */
+
+ /* action dscp ls */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "dscp") == 0) &&
+ (strcmp(tokens[1], "ls") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action dscp ls");
+ return;
+ }
-struct cmd_fa_dscp_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t dscp_string;
- cmdline_fixed_string_t ls_string;
-};
+ status = app_pipeline_fa_dscp_ls(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "action dscp ls");
-static void
-cmd_fa_dscp_ls_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_dscp_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ return;
+ } /* action dscp ls */
- status = app_pipeline_fa_dscp_ls(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
+ printf(CMD_MSG_FAIL, "action");
}
-cmdline_parse_token_string_t cmd_fa_dscp_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result,
- p_string, "p");
+static cmdline_parse_token_string_t cmd_action_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, p_string, "p");
-cmdline_parse_token_num_t cmd_fa_dscp_ls_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_ls_result,
- pipeline_id, UINT32);
+static cmdline_parse_token_num_t cmd_action_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_action_result, pipeline_id, UINT32);
-cmdline_parse_token_string_t cmd_fa_dscp_ls_dscp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result,
- dscp_string, "dscp");
+static cmdline_parse_token_string_t cmd_action_action_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, action_string, "action");
-cmdline_parse_token_string_t cmd_fa_dscp_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_action_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_inst_t cmd_fa_dscp_ls = {
- .f = cmd_fa_dscp_ls_parsed,
+cmdline_parse_inst_t cmd_action = {
+ .f = cmd_action_parsed,
.data = NULL,
- .help_str = "Flow DSCP translaton table list",
+ .help_str = "flow actions (meter, policer, policer stats, dscp table)",
.tokens = {
- (void *) &cmd_fa_dscp_ls_p_string,
- (void *) &cmd_fa_dscp_ls_pipeline_id,
- (void *) &cmd_fa_dscp_ls_dscp_string,
- (void *) &cmd_fa_dscp_ls_string,
+ (void *) &cmd_action_p_string,
+ (void *) &cmd_action_pipeline_id,
+ (void *) &cmd_action_action_string,
+ (void *) &cmd_action_multi_string,
NULL,
},
};
static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_fa_meter_config,
- (cmdline_parse_inst_t *) &cmd_fa_meter_config_bulk,
- (cmdline_parse_inst_t *) &cmd_fa_policer_config,
- (cmdline_parse_inst_t *) &cmd_fa_policer_config_bulk,
- (cmdline_parse_inst_t *) &cmd_fa_output_port_config,
- (cmdline_parse_inst_t *) &cmd_fa_output_port_config_bulk,
- (cmdline_parse_inst_t *) &cmd_fa_dscp_config,
- (cmdline_parse_inst_t *) &cmd_fa_policer_stats,
- (cmdline_parse_inst_t *) &cmd_fa_flow_ls,
- (cmdline_parse_inst_t *) &cmd_fa_dscp_ls,
+ (cmdline_parse_inst_t *) &cmd_action,
NULL,
};
static struct pipeline_fe_ops pipeline_flow_actions_fe_ops = {
.f_init = app_pipeline_fa_init,
+ .f_post_init = NULL,
.f_free = app_pipeline_fa_free,
+ .f_track = app_pipeline_track_default,
.cmds = pipeline_cmds,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions.h b/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
index f2cd0cbb..9c609741 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
@@ -73,6 +73,17 @@ app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
int clear,
struct pipeline_fa_policer_stats *stats);
+#ifndef APP_PIPELINE_FA_MAX_RECORDS_IN_FILE
+#define APP_PIPELINE_FA_MAX_RECORDS_IN_FILE 65536
+#endif
+
+int
+app_pipeline_fa_load_file(char *filename,
+ uint32_t *flow_ids,
+ struct pipeline_fa_flow_params *p,
+ uint32_t *n_flows,
+ uint32_t *line);
+
extern struct pipeline_type pipeline_flow_actions;
#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
index 3ad3ee63..11fcbb76 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
@@ -760,27 +760,6 @@ pipeline_fa_free(void *pipeline)
}
static int
-pipeline_fa_track(void *pipeline,
- __rte_unused uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_in == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-static int
pipeline_fa_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
@@ -1007,5 +986,4 @@ struct pipeline_be_ops pipeline_flow_actions_be_ops = {
.f_free = pipeline_fa_free,
.f_run = NULL,
.f_timer = pipeline_fa_timer,
- .f_track = pipeline_fa_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
index 19215748..9ef50cc9 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <string.h>
#include <sys/queue.h>
#include <netinet/in.h>
+#include <unistd.h>
#include <rte_common.h>
#include <rte_hexdump.h>
@@ -43,13 +44,12 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
#include "app.h"
#include "pipeline_common_fe.h"
#include "pipeline_flow_classification.h"
#include "hash_func.h"
+#include "parser.h"
/*
* Key conversion
@@ -96,9 +96,9 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
struct pkt_key_qinq *qinq = key_buffer;
qinq->ethertype_svlan = 0;
- qinq->svlan = rte_bswap16(key_in->key.qinq.svlan);
+ qinq->svlan = rte_cpu_to_be_16(key_in->key.qinq.svlan);
qinq->ethertype_cvlan = 0;
- qinq->cvlan = rte_bswap16(key_in->key.qinq.cvlan);
+ qinq->cvlan = rte_cpu_to_be_16(key_in->key.qinq.cvlan);
if (signature)
*signature = (uint32_t) hash_default_key8(qinq, 8, 0);
@@ -112,10 +112,10 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
ipv4->ttl = 0;
ipv4->proto = key_in->key.ipv4_5tuple.proto;
ipv4->checksum = 0;
- ipv4->ip_src = rte_bswap32(key_in->key.ipv4_5tuple.ip_src);
- ipv4->ip_dst = rte_bswap32(key_in->key.ipv4_5tuple.ip_dst);
- ipv4->port_src = rte_bswap16(key_in->key.ipv4_5tuple.port_src);
- ipv4->port_dst = rte_bswap16(key_in->key.ipv4_5tuple.port_dst);
+ ipv4->ip_src = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_src);
+ ipv4->ip_dst = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_dst);
+ ipv4->port_src = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_src);
+ ipv4->port_dst = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_dst);
if (signature)
*signature = (uint32_t) hash_default_key16(ipv4, 16, 0);
@@ -132,8 +132,8 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
ipv6->hop_limit = 0;
memcpy(&ipv6->ip_src, &key_in->key.ipv6_5tuple.ip_src, 16);
memcpy(&ipv6->ip_dst, &key_in->key.ipv6_5tuple.ip_dst, 16);
- ipv6->port_src = rte_bswap16(key_in->key.ipv6_5tuple.port_src);
- ipv6->port_dst = rte_bswap16(key_in->key.ipv6_5tuple.port_dst);
+ ipv6->port_src = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_src);
+ ipv6->port_dst = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_dst);
if (signature)
*signature = (uint32_t) hash_default_key64(ipv6, 64, 0);
@@ -278,6 +278,283 @@ app_pipeline_fc_key_check(struct pipeline_fc_key *key)
}
int
+app_pipeline_fc_load_file_qinq(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ uint16_t svlan, cvlan;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 7) ||
+ strcmp(tokens[0], "qinq") ||
+ parser_read_uint16(&svlan, tokens[1]) ||
+ parser_read_uint16(&cvlan, tokens[2]) ||
+ strcmp(tokens[3], "port") ||
+ parser_read_uint32(&portid, tokens[4]) ||
+ strcmp(tokens[5], "id") ||
+ parser_read_uint32(&flowid, tokens[6]))
+ goto error1;
+
+ keys[i].type = FLOW_KEY_QINQ;
+ keys[i].key.qinq.svlan = svlan;
+ keys[i].key.qinq.cvlan = cvlan;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error1;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error1:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+int
+app_pipeline_fc_load_file_ipv4(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ struct in_addr sipaddr, dipaddr;
+ uint16_t sport, dport;
+ uint8_t proto;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error2;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 10) ||
+ strcmp(tokens[0], "ipv4") ||
+ parse_ipv4_addr(tokens[1], &sipaddr) ||
+ parse_ipv4_addr(tokens[2], &dipaddr) ||
+ parser_read_uint16(&sport, tokens[3]) ||
+ parser_read_uint16(&dport, tokens[4]) ||
+ parser_read_uint8(&proto, tokens[5]) ||
+ strcmp(tokens[6], "port") ||
+ parser_read_uint32(&portid, tokens[7]) ||
+ strcmp(tokens[8], "id") ||
+ parser_read_uint32(&flowid, tokens[9]))
+ goto error2;
+
+ keys[i].type = FLOW_KEY_IPV4_5TUPLE;
+ keys[i].key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.port_src = sport;
+ keys[i].key.ipv4_5tuple.port_dst = dport;
+ keys[i].key.ipv4_5tuple.proto = proto;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error2;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error2:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+int
+app_pipeline_fc_load_file_ipv6(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ struct in6_addr sipaddr, dipaddr;
+ uint16_t sport, dport;
+ uint8_t proto;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error3;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 10) ||
+ strcmp(tokens[0], "ipv6") ||
+ parse_ipv6_addr(tokens[1], &sipaddr) ||
+ parse_ipv6_addr(tokens[2], &dipaddr) ||
+ parser_read_uint16(&sport, tokens[3]) ||
+ parser_read_uint16(&dport, tokens[4]) ||
+ parser_read_uint8(&proto, tokens[5]) ||
+ strcmp(tokens[6], "port") ||
+ parser_read_uint32(&portid, tokens[7]) ||
+ strcmp(tokens[8], "id") ||
+ parser_read_uint32(&flowid, tokens[9]))
+ goto error3;
+
+ keys[i].type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(keys[i].key.ipv6_5tuple.ip_src,
+ sipaddr.s6_addr,
+ sizeof(sipaddr.s6_addr));
+ memcpy(keys[i].key.ipv6_5tuple.ip_dst,
+ dipaddr.s6_addr,
+ sizeof(dipaddr.s6_addr));
+ keys[i].key.ipv6_5tuple.port_src = sport;
+ keys[i].key.ipv6_5tuple.port_dst = dport;
+ keys[i].key.ipv6_5tuple.proto = proto;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error3;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error3:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+
+
+int
app_pipeline_fc_add(struct app_params *app,
uint32_t pipeline_id,
struct pipeline_fc_key *key,
@@ -896,1315 +1173,728 @@ app_pipeline_fc_ls(struct app_params *app,
return 0;
}
-
/*
- * flow add qinq
- */
-
-struct cmd_fc_add_qinq_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t qinq_string;
- uint16_t svlan;
- uint16_t cvlan;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t flowid_string;
- uint32_t flow_id;
-};
-
-static void
-cmd_fc_add_qinq_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_qinq_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
-
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_QINQ;
- key.key.qinq.svlan = params->svlan;
- key.key.qinq.cvlan = params->cvlan;
-
- status = app_pipeline_fc_add(app,
- params->pipeline_id,
- &key,
- params->port,
- params->flow_id);
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, p_string, "p");
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flow_string,
- "flow");
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, add_string,
- "add");
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, qinq_string,
- "qinq");
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_svlan =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, svlan, UINT16);
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_cvlan =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, cvlan, UINT16);
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, port_string,
- "port");
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_port =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, port, UINT32);
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_flowid_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flowid_string,
- "flowid");
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, flow_id, UINT32);
-
-cmdline_parse_inst_t cmd_fc_add_qinq = {
- .f = cmd_fc_add_qinq_parsed,
- .data = NULL,
- .help_str = "Flow add (Q-in-Q)",
- .tokens = {
- (void *) &cmd_fc_add_qinq_p_string,
- (void *) &cmd_fc_add_qinq_pipeline_id,
- (void *) &cmd_fc_add_qinq_flow_string,
- (void *) &cmd_fc_add_qinq_add_string,
- (void *) &cmd_fc_add_qinq_qinq_string,
- (void *) &cmd_fc_add_qinq_svlan,
- (void *) &cmd_fc_add_qinq_cvlan,
- (void *) &cmd_fc_add_qinq_port_string,
- (void *) &cmd_fc_add_qinq_port,
- (void *) &cmd_fc_add_qinq_flowid_string,
- (void *) &cmd_fc_add_qinq_flow_id,
- NULL,
- },
-};
-
-/*
- * flow add qinq all
+ * flow
+ *
+ * flow add:
+ * p <pipelineid> flow add qinq <svlan> <cvlan> port <portid> id <flowid>
+ * p <pipelineid> flow add qinq bulk <file>
+ * p <pipelineid> flow add ipv4 <sipaddr> <dipaddr> <sport> <dport> <proto> port <port ID> id <flowid>
+ * p <pipelineid> flow add ipv4 bulk <file>
+ * p <pipelineid> flow add ipv6 <sipaddr> <dipaddr> <sport> <dport> <proto> port <port ID> id <flowid>
+ * p <pipelineid> flow add ipv6 bulk <file>
+ *
+ * flow add default:
+ * p <pipelineid> flow add default <portid>
+ *
+ * flow del:
+ * p <pipelineid> flow del qinq <svlan> <cvlan>
+ * p <pipelineid> flow del ipv4 <sipaddr> <dipaddr> <sport> <dport> <proto>
+ * p <pipelineid> flow del ipv6 <sipaddr> <dipaddr> <sport> <dport> <proto>
+ *
+ * flow del default:
+ * p <pipelineid> flow del default
+ *
+ * flow ls:
+ * p <pipelineid> flow ls
*/
-struct cmd_fc_add_qinq_all_result {
+struct cmd_flow_result {
cmdline_fixed_string_t p_string;
uint32_t pipeline_id;
cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t qinq_string;
- cmdline_fixed_string_t all_string;
- uint32_t n_flows;
- uint32_t n_ports;
+ cmdline_multi_string_t multi_string;
};
-#ifndef N_FLOWS_BULK
-#define N_FLOWS_BULK 4096
-#endif
-
static void
-cmd_fc_add_qinq_all_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
+cmd_flow_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
void *data)
{
- struct cmd_fc_add_qinq_all_result *params = parsed_result;
+ struct cmd_flow_result *results = parsed_result;
struct app_params *app = data;
- struct pipeline_fc_key *key;
- uint32_t *port_id;
- uint32_t *flow_id;
- uint32_t id;
-
- /* Check input arguments */
- if (params->n_flows == 0) {
- printf("Invalid number of flows\n");
- return;
- }
- if (params->n_ports == 0) {
- printf("Invalid number of output ports\n");
- return;
- }
-
- /* Memory allocation */
- key = rte_zmalloc(NULL,
- N_FLOWS_BULK * sizeof(*key),
- RTE_CACHE_LINE_SIZE);
- if (key == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
-
- port_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*port_id),
- RTE_CACHE_LINE_SIZE);
- if (port_id == NULL) {
- rte_free(key);
- printf("Memory allocation failed\n");
- return;
- }
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
- flow_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*flow_id),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- rte_free(port_id);
- rte_free(key);
- printf("Memory allocation failed\n");
+ status = parse_tokenize_string(results->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "flow");
return;
}
- /* Flow add */
- for (id = 0; id < params->n_flows; id++) {
- uint32_t pos = id & (N_FLOWS_BULK - 1);
-
- key[pos].type = FLOW_KEY_QINQ;
- key[pos].key.qinq.svlan = id >> 12;
- key[pos].key.qinq.cvlan = id & 0xFFF;
-
- port_id[pos] = id % params->n_ports;
- flow_id[pos] = id;
-
- if ((pos == N_FLOWS_BULK - 1) ||
- (id == params->n_flows - 1)) {
- int status;
-
- status = app_pipeline_fc_add_bulk(app,
- params->pipeline_id,
- key,
- port_id,
- flow_id,
- pos + 1);
-
- if (status != 0) {
- printf("Command failed\n");
-
- break;
- }
+ /* flow add qinq */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ uint32_t svlan;
+ uint32_t cvlan;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 8) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add qinq");
+ return;
}
- }
-
- /* Memory free */
- rte_free(flow_id);
- rte_free(port_id);
- rte_free(key);
-}
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, p_string,
- "p");
+ if (parser_read_uint32(&svlan, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_qinq_all_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, pipeline_id,
- UINT32);
+ if (parser_read_uint32(&cvlan, tokens[3]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, flow_string,
- "flow");
+ if (strcmp(tokens[4], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, add_string,
- "add");
+ if (parser_read_uint32(&port_id, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, qinq_string,
- "qinq");
+ if (strcmp(tokens[6], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_all_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, all_string,
- "all");
+ if (parser_read_uint32(&flow_id, tokens[7]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_qinq_all_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, n_flows,
- UINT32);
+ key.type = FLOW_KEY_QINQ;
+ key.key.qinq.svlan = svlan;
+ key.key.qinq.cvlan = cvlan;
-cmdline_parse_token_num_t cmd_fc_add_qinq_all_n_ports =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, n_ports,
- UINT32);
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add qinq");
-cmdline_parse_inst_t cmd_fc_add_qinq_all = {
- .f = cmd_fc_add_qinq_all_parsed,
- .data = NULL,
- .help_str = "Flow add all (Q-in-Q)",
- .tokens = {
- (void *) &cmd_fc_add_qinq_all_p_string,
- (void *) &cmd_fc_add_qinq_all_pipeline_id,
- (void *) &cmd_fc_add_qinq_all_flow_string,
- (void *) &cmd_fc_add_qinq_all_add_string,
- (void *) &cmd_fc_add_qinq_all_qinq_string,
- (void *) &cmd_fc_add_qinq_all_all_string,
- (void *) &cmd_fc_add_qinq_all_n_flows,
- (void *) &cmd_fc_add_qinq_all_n_ports,
- NULL,
- },
-};
+ return;
+ } /* flow add qinq */
+
+ /* flow add ipv4 */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ struct in_addr sipaddr;
+ struct in_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 11) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv4");
+ return;
+ }
-/*
- * flow add ipv4_5tuple
- */
+ if (parse_ipv4_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv4addr");
+ return;
+ }
+ if (parse_ipv4_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv4addr");
+ return;
+ }
-struct cmd_fc_add_ipv4_5tuple_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv4_5tuple_string;
- cmdline_ipaddr_t ip_src;
- cmdline_ipaddr_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t flowid_string;
- uint32_t flow_id;
-};
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
-static void
-cmd_fc_add_ipv4_5tuple_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_ipv4_5tuple_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_IPV4_5TUPLE;
- key.key.ipv4_5tuple.ip_src = rte_bswap32(
- params->ip_src.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.ip_dst = rte_bswap32(
- params->ip_dst.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.port_src = params->port_src;
- key.key.ipv4_5tuple.port_dst = params->port_dst;
- key.key.ipv4_5tuple.proto = params->proto;
-
- status = app_pipeline_fc_add(app,
- params->pipeline_id,
- &key,
- params->port,
- params->flow_id);
- if (status != 0)
- printf("Command failed\n");
-}
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, p_string,
- "p");
+ if (strcmp(tokens[7], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, pipeline_id,
- UINT32);
+ if (parser_read_uint32(&port_id, tokens[8]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
- flow_string, "flow");
+ if (strcmp(tokens[9], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
- add_string, "add");
+ if (parser_read_uint32(&flow_id, tokens[10]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_ipv4_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
- ipv4_5tuple_string, "ipv4_5tuple");
+ key.type = FLOW_KEY_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.port_src = sport;
+ key.key.ipv4_5tuple.port_dst = dport;
+ key.key.ipv4_5tuple.proto = proto;
+
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv4");
-cmdline_parse_token_ipaddr_t cmd_fc_add_ipv4_5tuple_ip_src =
- TOKEN_IPV4_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, ip_src);
+ return;
+ } /* flow add ipv4 */
+
+ /* flow add ipv6 */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ struct in6_addr sipaddr;
+ struct in6_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 11) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv6");
+ return;
+ }
-cmdline_parse_token_ipaddr_t cmd_fc_add_ipv4_5tuple_ip_dst =
- TOKEN_IPV4_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, ip_dst);
+ if (parse_ipv6_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv6addr");
+ return;
+ }
+ if (parse_ipv6_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv6addr");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port_src =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_src,
- UINT16);
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port_dst =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_dst,
- UINT16);
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, proto,
- UINT32);
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_string,
- "port");
+ if (strcmp(tokens[7], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port,
- UINT32);
+ if (parser_read_uint32(&port_id, tokens[8]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flowid_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
- flowid_string, "flowid");
+ if (strcmp(tokens[9], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, flow_id,
- UINT32);
+ if (parser_read_uint32(&flow_id, tokens[10]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
- .f = cmd_fc_add_ipv4_5tuple_parsed,
- .data = NULL,
- .help_str = "Flow add (IPv4 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_add_ipv4_5tuple_p_string,
- (void *) &cmd_fc_add_ipv4_5tuple_pipeline_id,
- (void *) &cmd_fc_add_ipv4_5tuple_flow_string,
- (void *) &cmd_fc_add_ipv4_5tuple_add_string,
- (void *) &cmd_fc_add_ipv4_5tuple_ipv4_5tuple_string,
- (void *) &cmd_fc_add_ipv4_5tuple_ip_src,
- (void *) &cmd_fc_add_ipv4_5tuple_ip_dst,
- (void *) &cmd_fc_add_ipv4_5tuple_port_src,
- (void *) &cmd_fc_add_ipv4_5tuple_port_dst,
- (void *) &cmd_fc_add_ipv4_5tuple_proto,
- (void *) &cmd_fc_add_ipv4_5tuple_port_string,
- (void *) &cmd_fc_add_ipv4_5tuple_port,
- (void *) &cmd_fc_add_ipv4_5tuple_flowid_string,
- (void *) &cmd_fc_add_ipv4_5tuple_flow_id,
- NULL,
- },
-};
+ key.type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(key.key.ipv6_5tuple.ip_src, (void *)&sipaddr, 16);
+ memcpy(key.key.ipv6_5tuple.ip_dst, (void *)&dipaddr, 16);
+ key.key.ipv6_5tuple.port_src = sport;
+ key.key.ipv6_5tuple.port_dst = dport;
+ key.key.ipv6_5tuple.proto = proto;
+
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv6");
-/*
- * flow add ipv4_5tuple all
- */
+ return;
+ } /* flow add ipv6 */
+
+ /* flow add qinq bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add qinq bulk");
+ return;
+ }
-struct cmd_fc_add_ipv4_5tuple_all_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv4_5tuple_string;
- cmdline_fixed_string_t all_string;
- uint32_t n_flows;
- uint32_t n_ports;
-};
+ filename = tokens[3];
-static void
-cmd_fc_add_ipv4_5tuple_all_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_ipv4_5tuple_all_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key *key;
- uint32_t *port_id;
- uint32_t *flow_id;
- uint32_t id;
-
- /* Check input parameters */
- if (params->n_flows == 0) {
- printf("Invalid number of flows\n");
- return;
- }
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
- if (params->n_ports == 0) {
- printf("Invalid number of ports\n");
- return;
- }
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
- /* Memory allocation */
- key = rte_zmalloc(NULL,
- N_FLOWS_BULK * sizeof(*key),
- RTE_CACHE_LINE_SIZE);
- if (key == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
- port_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*port_id),
- RTE_CACHE_LINE_SIZE);
- if (port_id == NULL) {
- rte_free(key);
- printf("Memory allocation failed\n");
- return;
- }
+ status = app_pipeline_fc_load_file_qinq(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
- flow_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*flow_id),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- rte_free(port_id);
- rte_free(key);
- printf("Memory allocation failed\n");
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add qinq bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
return;
- }
-
- /* Flow add */
- for (id = 0; id < params->n_flows; id++) {
- uint32_t pos = id & (N_FLOWS_BULK - 1);
+ } /* flow add qinq bulk */
+
+ /* flow add ipv4 bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv4 bulk");
+ return;
+ }
- key[pos].type = FLOW_KEY_IPV4_5TUPLE;
- key[pos].key.ipv4_5tuple.ip_src = 0;
- key[pos].key.ipv4_5tuple.ip_dst = id;
- key[pos].key.ipv4_5tuple.port_src = 0;
- key[pos].key.ipv4_5tuple.port_dst = 0;
- key[pos].key.ipv4_5tuple.proto = 6;
+ filename = tokens[3];
- port_id[pos] = id % params->n_ports;
- flow_id[pos] = id;
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
- if ((pos == N_FLOWS_BULK - 1) ||
- (id == params->n_flows - 1)) {
- int status;
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
- status = app_pipeline_fc_add_bulk(app,
- params->pipeline_id,
- key,
- port_id,
- flow_id,
- pos + 1);
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
- if (status != 0) {
- printf("Command failed\n");
+ status = app_pipeline_fc_load_file_ipv4(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
- break;
- }
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv4 bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ } /* flow add ipv4 bulk */
+
+ /* flow add ipv6 bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv6 bulk");
+ return;
}
- }
- /* Memory free */
- rte_free(flow_id);
- rte_free(port_id);
- rte_free(key);
-}
+ filename = tokens[3];
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- p_string, "p");
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- pipeline_id, UINT32);
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- flow_string, "flow");
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- add_string, "add");
+ status = app_pipeline_fc_load_file_ipv6(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_ipv4_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- ipv4_5tuple_string, "ipv4_5tuple");
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv6 bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ } /* flow add ipv6 bulk */
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_all_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- all_string, "all");
+ /* flow add default*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- n_flows, UINT32);
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add default");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_n_ports =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- n_ports, UINT32);
+ if (parser_read_uint32(&port_id, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple_all = {
- .f = cmd_fc_add_ipv4_5tuple_all_parsed,
- .data = NULL,
- .help_str = "Flow add all (IPv4 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_add_ipv4_5tuple_all_p_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_pipeline_id,
- (void *) &cmd_fc_add_ipv4_5tuple_all_flow_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_add_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_ipv4_5tuple_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_all_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_n_flows,
- (void *) &cmd_fc_add_ipv4_5tuple_all_n_ports,
- NULL,
- },
-};
+ status = app_pipeline_fc_add_default(app,
+ results->pipeline_id,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add default");
-/*
- * flow add ipv6_5tuple
- */
+ return;
+ } /* flow add default */
-struct cmd_fc_add_ipv6_5tuple_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv6_5tuple_string;
- cmdline_ipaddr_t ip_src;
- cmdline_ipaddr_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t flowid_string;
- uint32_t flow_id;
-};
+ /* flow del qinq */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0)) {
+ struct pipeline_fc_key key;
+ uint32_t svlan;
+ uint32_t cvlan;
-static void
-cmd_fc_add_ipv6_5tuple_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_ipv6_5tuple_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
+ memset(&key, 0, sizeof(key));
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_IPV6_5TUPLE;
- memcpy(key.key.ipv6_5tuple.ip_src,
- params->ip_src.addr.ipv6.s6_addr,
- 16);
- memcpy(key.key.ipv6_5tuple.ip_dst,
- params->ip_dst.addr.ipv6.s6_addr,
- 16);
- key.key.ipv6_5tuple.port_src = params->port_src;
- key.key.ipv6_5tuple.port_dst = params->port_dst;
- key.key.ipv6_5tuple.proto = params->proto;
-
- status = app_pipeline_fc_add(app,
- params->pipeline_id,
- &key,
- params->port,
- params->flow_id);
- if (status != 0)
- printf("Command failed\n");
-}
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del qinq");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- p_string, "p");
+ if (parser_read_uint32(&svlan, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, pipeline_id,
- UINT32);
+ if (parser_read_uint32(&cvlan, tokens[3]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- flow_string, "flow");
+ key.type = FLOW_KEY_QINQ;
+ key.key.qinq.svlan = svlan;
+ key.key.qinq.cvlan = cvlan;
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- add_string, "add");
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del qinq");
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_ipv6_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- ipv6_5tuple_string, "ipv6_5tuple");
+ return;
+ } /* flow del qinq */
+
+ /* flow del ipv4 */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0)) {
+ struct pipeline_fc_key key;
+ struct in_addr sipaddr;
+ struct in_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 7) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del ipv4");
+ return;
+ }
-cmdline_parse_token_ipaddr_t cmd_fc_add_ipv6_5tuple_ip_src =
- TOKEN_IPV6_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, ip_src);
+ if (parse_ipv4_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv4addr");
+ return;
+ }
+ if (parse_ipv4_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv4addr");
+ return;
+ }
-cmdline_parse_token_ipaddr_t cmd_fc_add_ipv6_5tuple_ip_dst =
- TOKEN_IPV6_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, ip_dst);
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port_src =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port_src,
- UINT16);
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port_dst =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port_dst,
- UINT16);
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, proto,
- UINT32);
+ key.type = FLOW_KEY_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.port_src = sport;
+ key.key.ipv4_5tuple.port_dst = dport;
+ key.key.ipv4_5tuple.proto = proto;
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- port_string, "port");
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del ipv4");
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port,
- UINT32);
+ return;
+ } /* flow del ipv4 */
+
+ /* flow del ipv6 */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0)) {
+ struct pipeline_fc_key key;
+ struct in6_addr sipaddr;
+ struct in6_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 7) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del ipv6");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flowid_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- flowid_string, "flowid");
+ if (parse_ipv6_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv6addr");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, flow_id,
- UINT32);
+ if (parse_ipv6_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv6addr");
+ return;
+ }
-cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
- .f = cmd_fc_add_ipv6_5tuple_parsed,
- .data = NULL,
- .help_str = "Flow add (IPv6 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_add_ipv6_5tuple_p_string,
- (void *) &cmd_fc_add_ipv6_5tuple_pipeline_id,
- (void *) &cmd_fc_add_ipv6_5tuple_flow_string,
- (void *) &cmd_fc_add_ipv6_5tuple_add_string,
- (void *) &cmd_fc_add_ipv6_5tuple_ipv6_5tuple_string,
- (void *) &cmd_fc_add_ipv6_5tuple_ip_src,
- (void *) &cmd_fc_add_ipv6_5tuple_ip_dst,
- (void *) &cmd_fc_add_ipv6_5tuple_port_src,
- (void *) &cmd_fc_add_ipv6_5tuple_port_dst,
- (void *) &cmd_fc_add_ipv6_5tuple_proto,
- (void *) &cmd_fc_add_ipv6_5tuple_port_string,
- (void *) &cmd_fc_add_ipv6_5tuple_port,
- (void *) &cmd_fc_add_ipv6_5tuple_flowid_string,
- (void *) &cmd_fc_add_ipv6_5tuple_flow_id,
- NULL,
- },
-};
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
-/*
- * flow add ipv6_5tuple all
- */
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
-struct cmd_fc_add_ipv6_5tuple_all_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv6_5tuple_string;
- cmdline_fixed_string_t all_string;
- uint32_t n_flows;
- uint32_t n_ports;
-};
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-static void
-cmd_fc_add_ipv6_5tuple_all_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_ipv6_5tuple_all_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key *key;
- uint32_t *port_id;
- uint32_t *flow_id;
- uint32_t id;
-
- /* Check input parameters */
- if (params->n_flows == 0) {
- printf("Invalid number of flows\n");
- return;
- }
+ key.type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(key.key.ipv6_5tuple.ip_src, &sipaddr, 16);
+ memcpy(key.key.ipv6_5tuple.ip_dst, &dipaddr, 16);
+ key.key.ipv6_5tuple.port_src = sport;
+ key.key.ipv6_5tuple.port_dst = dport;
+ key.key.ipv6_5tuple.proto = proto;
- if (params->n_ports == 0) {
- printf("Invalid number of ports\n");
- return;
- }
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del ipv6");
- /* Memory allocation */
- key = rte_zmalloc(NULL,
- N_FLOWS_BULK * sizeof(*key),
- RTE_CACHE_LINE_SIZE);
- if (key == NULL) {
- printf("Memory allocation failed\n");
return;
- }
+ } /* flow del ipv6 */
+
+ /* flow del default*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del default");
+ return;
+ }
- port_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*port_id),
- RTE_CACHE_LINE_SIZE);
- if (port_id == NULL) {
- rte_free(key);
- printf("Memory allocation failed\n");
- return;
- }
+ status = app_pipeline_fc_del_default(app,
+ results->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del default");
- flow_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*flow_id),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- rte_free(port_id);
- rte_free(key);
- printf("Memory allocation failed\n");
return;
- }
-
- /* Flow add */
- for (id = 0; id < params->n_flows; id++) {
- uint32_t pos = id & (N_FLOWS_BULK - 1);
- uint32_t *x;
-
- key[pos].type = FLOW_KEY_IPV6_5TUPLE;
- x = (uint32_t *) key[pos].key.ipv6_5tuple.ip_dst;
- *x = rte_bswap32(id);
- key[pos].key.ipv6_5tuple.proto = 6;
-
- port_id[pos] = id % params->n_ports;
- flow_id[pos] = id;
+ } /* flow del default */
- if ((pos == N_FLOWS_BULK - 1) ||
- (id == params->n_flows - 1)) {
- int status;
-
- status = app_pipeline_fc_add_bulk(app,
- params->pipeline_id,
- key,
- port_id,
- flow_id,
- pos + 1);
-
- if (status != 0) {
- printf("Command failed\n");
-
- break;
- }
+ /* flow ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow ls");
+ return;
}
- }
-
- /* Memory free */
- rte_free(flow_id);
- rte_free(port_id);
- rte_free(key);
-}
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- flow_string, "flow");
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- add_string, "add");
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_ipv6_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- ipv6_5tuple_string, "ipv6_5tuple");
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_all_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- all_string, "all");
-
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- n_flows, UINT32);
-
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_n_ports =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- n_ports, UINT32);
-
-cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple_all = {
- .f = cmd_fc_add_ipv6_5tuple_all_parsed,
- .data = NULL,
- .help_str = "Flow add all (ipv6 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_add_ipv6_5tuple_all_p_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_pipeline_id,
- (void *) &cmd_fc_add_ipv6_5tuple_all_flow_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_add_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_ipv6_5tuple_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_all_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_n_flows,
- (void *) &cmd_fc_add_ipv6_5tuple_all_n_ports,
- NULL,
- },
-};
-
-/*
- * flow del qinq
- */
-struct cmd_fc_del_qinq_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t qinq_string;
- uint16_t svlan;
- uint16_t cvlan;
-};
-
-static void
-cmd_fc_del_qinq_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_del_qinq_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
-
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_QINQ;
- key.key.qinq.svlan = params->svlan;
- key.key.qinq.cvlan = params->cvlan;
- status = app_pipeline_fc_del(app, params->pipeline_id, &key);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_del_qinq_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, p_string, "p");
-
-cmdline_parse_token_num_t cmd_fc_del_qinq_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_fc_del_qinq_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, flow_string,
- "flow");
-
-cmdline_parse_token_string_t cmd_fc_del_qinq_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, del_string,
- "del");
-
-cmdline_parse_token_string_t cmd_fc_del_qinq_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, qinq_string,
- "qinq");
-
-cmdline_parse_token_num_t cmd_fc_del_qinq_svlan =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, svlan, UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_qinq_cvlan =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, cvlan, UINT16);
-
-cmdline_parse_inst_t cmd_fc_del_qinq = {
- .f = cmd_fc_del_qinq_parsed,
- .data = NULL,
- .help_str = "Flow delete (Q-in-Q)",
- .tokens = {
- (void *) &cmd_fc_del_qinq_p_string,
- (void *) &cmd_fc_del_qinq_pipeline_id,
- (void *) &cmd_fc_del_qinq_flow_string,
- (void *) &cmd_fc_del_qinq_del_string,
- (void *) &cmd_fc_del_qinq_qinq_string,
- (void *) &cmd_fc_del_qinq_svlan,
- (void *) &cmd_fc_del_qinq_cvlan,
- NULL,
- },
-};
-
-/*
- * flow del ipv4_5tuple
- */
-
-struct cmd_fc_del_ipv4_5tuple_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t ipv4_5tuple_string;
- cmdline_ipaddr_t ip_src;
- cmdline_ipaddr_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
-};
-
-static void
-cmd_fc_del_ipv4_5tuple_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_del_ipv4_5tuple_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
-
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_IPV4_5TUPLE;
- key.key.ipv4_5tuple.ip_src = rte_bswap32(
- params->ip_src.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.ip_dst = rte_bswap32(
- params->ip_dst.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.port_src = params->port_src;
- key.key.ipv4_5tuple.port_dst = params->port_dst;
- key.key.ipv4_5tuple.proto = params->proto;
-
- status = app_pipeline_fc_del(app, params->pipeline_id, &key);
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- p_string, "p");
-cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- pipeline_id, UINT32);
+ status = app_pipeline_fc_ls(app, results->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow ls");
-cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- flow_string, "flow");
-
-cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- del_string, "del");
-
-cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_ipv4_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- ipv4_5tuple_string, "ipv4_5tuple");
-
-cmdline_parse_token_ipaddr_t cmd_fc_del_ipv4_5tuple_ip_src =
- TOKEN_IPV4_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- ip_src);
-
-cmdline_parse_token_ipaddr_t cmd_fc_del_ipv4_5tuple_ip_dst =
- TOKEN_IPV4_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result, ip_dst);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_port_src =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- port_src, UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_port_dst =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- port_dst, UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- proto, UINT32);
-
-cmdline_parse_inst_t cmd_fc_del_ipv4_5tuple = {
- .f = cmd_fc_del_ipv4_5tuple_parsed,
- .data = NULL,
- .help_str = "Flow delete (IPv4 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_del_ipv4_5tuple_p_string,
- (void *) &cmd_fc_del_ipv4_5tuple_pipeline_id,
- (void *) &cmd_fc_del_ipv4_5tuple_flow_string,
- (void *) &cmd_fc_del_ipv4_5tuple_del_string,
- (void *) &cmd_fc_del_ipv4_5tuple_ipv4_5tuple_string,
- (void *) &cmd_fc_del_ipv4_5tuple_ip_src,
- (void *) &cmd_fc_del_ipv4_5tuple_ip_dst,
- (void *) &cmd_fc_del_ipv4_5tuple_port_src,
- (void *) &cmd_fc_del_ipv4_5tuple_port_dst,
- (void *) &cmd_fc_del_ipv4_5tuple_proto,
- NULL,
- },
-};
-
-/*
- * flow del ipv6_5tuple
- */
-
-struct cmd_fc_del_ipv6_5tuple_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t ipv6_5tuple_string;
- cmdline_ipaddr_t ip_src;
- cmdline_ipaddr_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
-};
-
-static void
-cmd_fc_del_ipv6_5tuple_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_del_ipv6_5tuple_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
-
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_IPV6_5TUPLE;
- memcpy(key.key.ipv6_5tuple.ip_src,
- params->ip_src.addr.ipv6.s6_addr,
- 16);
- memcpy(key.key.ipv6_5tuple.ip_dst,
- params->ip_dst.addr.ipv6.s6_addr,
- 16);
- key.key.ipv6_5tuple.port_src = params->port_src;
- key.key.ipv6_5tuple.port_dst = params->port_dst;
- key.key.ipv6_5tuple.proto = params->proto;
-
- status = app_pipeline_fc_del(app, params->pipeline_id, &key);
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- flow_string, "flow");
-
-cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- del_string, "del");
-
-cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_ipv6_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- ipv6_5tuple_string, "ipv6_5tuple");
-
-cmdline_parse_token_ipaddr_t cmd_fc_del_ipv6_5tuple_ip_src =
- TOKEN_IPV6_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, ip_src);
-
-cmdline_parse_token_ipaddr_t cmd_fc_del_ipv6_5tuple_ip_dst =
- TOKEN_IPV6_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, ip_dst);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_port_src =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, port_src,
- UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_port_dst =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, port_dst,
- UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, proto,
- UINT32);
-
-cmdline_parse_inst_t cmd_fc_del_ipv6_5tuple = {
- .f = cmd_fc_del_ipv6_5tuple_parsed,
- .data = NULL,
- .help_str = "Flow delete (IPv6 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_del_ipv6_5tuple_p_string,
- (void *) &cmd_fc_del_ipv6_5tuple_pipeline_id,
- (void *) &cmd_fc_del_ipv6_5tuple_flow_string,
- (void *) &cmd_fc_del_ipv6_5tuple_del_string,
- (void *) &cmd_fc_del_ipv6_5tuple_ipv6_5tuple_string,
- (void *) &cmd_fc_del_ipv6_5tuple_ip_src,
- (void *) &cmd_fc_del_ipv6_5tuple_ip_dst,
- (void *) &cmd_fc_del_ipv6_5tuple_port_src,
- (void *) &cmd_fc_del_ipv6_5tuple_port_dst,
- (void *) &cmd_fc_del_ipv6_5tuple_proto,
- NULL,
- },
-};
-
-/*
- * flow add default
- */
-
-struct cmd_fc_add_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t default_string;
- uint32_t port;
-};
-
-static void
-cmd_fc_add_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_fc_add_default(app, params->pipeline_id,
- params->port);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_add_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, p_string,
- "p");
-
-cmdline_parse_token_num_t cmd_fc_add_default_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_default_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_fc_add_default_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, flow_string,
- "flow");
-
-cmdline_parse_token_string_t cmd_fc_add_default_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, add_string,
- "add");
-
-cmdline_parse_token_string_t cmd_fc_add_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result,
- default_string, "default");
-
-cmdline_parse_token_num_t cmd_fc_add_default_port =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_default_result, port, UINT32);
-
-cmdline_parse_inst_t cmd_fc_add_default = {
- .f = cmd_fc_add_default_parsed,
- .data = NULL,
- .help_str = "Flow add default",
- .tokens = {
- (void *) &cmd_fc_add_default_p_string,
- (void *) &cmd_fc_add_default_pipeline_id,
- (void *) &cmd_fc_add_default_flow_string,
- (void *) &cmd_fc_add_default_add_string,
- (void *) &cmd_fc_add_default_default_string,
- (void *) &cmd_fc_add_default_port,
- NULL,
- },
-};
-
-/*
- * flow del default
- */
-
-struct cmd_fc_del_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t default_string;
-};
-
-static void
-cmd_fc_del_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_del_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_fc_del_default(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_del_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, p_string,
- "p");
-
-cmdline_parse_token_num_t cmd_fc_del_default_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_default_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_fc_del_default_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, flow_string,
- "flow");
-
-cmdline_parse_token_string_t cmd_fc_del_default_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, del_string,
- "del");
-
-cmdline_parse_token_string_t cmd_fc_del_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result,
- default_string, "default");
-
-cmdline_parse_inst_t cmd_fc_del_default = {
- .f = cmd_fc_del_default_parsed,
- .data = NULL,
- .help_str = "Flow delete default",
- .tokens = {
- (void *) &cmd_fc_del_default_p_string,
- (void *) &cmd_fc_del_default_pipeline_id,
- (void *) &cmd_fc_del_default_flow_string,
- (void *) &cmd_fc_del_default_del_string,
- (void *) &cmd_fc_del_default_default_string,
- NULL,
- },
-};
-
-/*
- * flow ls
- */
-
-struct cmd_fc_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t ls_string;
-};
-
-static void
-cmd_fc_ls_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ return;
+ } /* flow ls */
- status = app_pipeline_fc_ls(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
+ printf(CMD_MSG_MISMATCH_ARGS, "flow");
}
-cmdline_parse_token_string_t cmd_fc_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result, p_string, "p");
+static cmdline_parse_token_string_t cmd_flow_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, p_string, "p");
-cmdline_parse_token_num_t cmd_fc_ls_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_ls_result, pipeline_id, UINT32);
+static cmdline_parse_token_num_t cmd_flow_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_result, pipeline_id, UINT32);
-cmdline_parse_token_string_t cmd_fc_ls_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result,
- flow_string, "flow");
+static cmdline_parse_token_string_t cmd_flow_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, flow_string, "flow");
-cmdline_parse_token_string_t cmd_fc_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_flow_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_inst_t cmd_fc_ls = {
- .f = cmd_fc_ls_parsed,
+static cmdline_parse_inst_t cmd_flow = {
+ .f = cmd_flow_parsed,
.data = NULL,
- .help_str = "Flow list",
+ .help_str = "flow add / add bulk / add default / del / del default / ls",
.tokens = {
- (void *) &cmd_fc_ls_p_string,
- (void *) &cmd_fc_ls_pipeline_id,
- (void *) &cmd_fc_ls_flow_string,
- (void *) &cmd_fc_ls_ls_string,
+ (void *) &cmd_flow_p_string,
+ (void *) &cmd_flow_pipeline_id,
+ (void *) &cmd_flow_flow_string,
+ (void *) &cmd_flow_multi_string,
NULL,
},
};
static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_fc_add_qinq,
- (cmdline_parse_inst_t *) &cmd_fc_add_ipv4_5tuple,
- (cmdline_parse_inst_t *) &cmd_fc_add_ipv6_5tuple,
-
- (cmdline_parse_inst_t *) &cmd_fc_del_qinq,
- (cmdline_parse_inst_t *) &cmd_fc_del_ipv4_5tuple,
- (cmdline_parse_inst_t *) &cmd_fc_del_ipv6_5tuple,
-
- (cmdline_parse_inst_t *) &cmd_fc_add_default,
- (cmdline_parse_inst_t *) &cmd_fc_del_default,
-
- (cmdline_parse_inst_t *) &cmd_fc_add_qinq_all,
- (cmdline_parse_inst_t *) &cmd_fc_add_ipv4_5tuple_all,
- (cmdline_parse_inst_t *) &cmd_fc_add_ipv6_5tuple_all,
-
- (cmdline_parse_inst_t *) &cmd_fc_ls,
+ (cmdline_parse_inst_t *) &cmd_flow,
NULL,
};
static struct pipeline_fe_ops pipeline_flow_classification_fe_ops = {
.f_init = app_pipeline_fc_init,
+ .f_post_init = NULL,
.f_free = app_pipeline_fc_free,
+ .f_track = app_pipeline_track_default,
.cmds = pipeline_cmds,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
index 9c775006..6c5ed384 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
@@ -102,6 +102,34 @@ int
app_pipeline_fc_del_default(struct app_params *app,
uint32_t pipeline_id);
+#ifndef APP_PIPELINE_FC_MAX_FLOWS_IN_FILE
+#define APP_PIPELINE_FC_MAX_FLOWS_IN_FILE (16 * 1024 * 1024)
+#endif
+
+int
+app_pipeline_fc_load_file_qinq(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
+int
+app_pipeline_fc_load_file_ipv4(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
+int
+app_pipeline_fc_load_file_ipv6(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
extern struct pipeline_type pipeline_flow_classification;
#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
index 70d976d5..8a762bc7 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -643,27 +643,6 @@ pipeline_fc_free(void *pipeline)
}
static int
-pipeline_fc_track(void *pipeline,
- __rte_unused uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_in == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-static int
pipeline_fc_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
@@ -807,5 +786,4 @@ struct pipeline_be_ops pipeline_flow_classification_be_ops = {
.f_free = pipeline_fc_free,
.f_run = NULL,
.f_timer = pipeline_fc_timer,
- .f_track = pipeline_fc_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_master.c b/examples/ip_pipeline/pipeline/pipeline_master.c
index 1ccdad14..aab58a27 100644
--- a/examples/ip_pipeline/pipeline/pipeline_master.c
+++ b/examples/ip_pipeline/pipeline/pipeline_master.c
@@ -36,7 +36,9 @@
static struct pipeline_fe_ops pipeline_master_fe_ops = {
.f_init = NULL,
+ .f_post_init = NULL,
.f_free = NULL,
+ .f_track = NULL,
.cmds = NULL,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_master_be.c b/examples/ip_pipeline/pipeline/pipeline_master_be.c
index ac0cbbc5..9a7c8c13 100644
--- a/examples/ip_pipeline/pipeline/pipeline_master_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_master_be.c
@@ -48,6 +48,7 @@
struct pipeline_master {
struct app_params *app;
struct cmdline *cl;
+ int post_init_done;
int script_file_done;
} __rte_cache_aligned;
@@ -77,6 +78,7 @@ pipeline_init(__rte_unused struct pipeline_params *params, void *arg)
return NULL;
}
+ p->post_init_done = 0;
p->script_file_done = 0;
if (app->script_file == NULL)
p->script_file_done = 1;
@@ -102,8 +104,20 @@ static int
pipeline_run(void *pipeline)
{
struct pipeline_master *p = (struct pipeline_master *) pipeline;
+ struct app_params *app = p->app;
int status;
+#ifdef RTE_LIBRTE_KNI
+ uint32_t i;
+#endif /* RTE_LIBRTE_KNI */
+ /* Application post-init phase */
+ if (p->post_init_done == 0) {
+ app_post_init(app);
+
+ p->post_init_done = 1;
+ }
+
+ /* Run startup script file */
if (p->script_file_done == 0) {
struct app_params *app = p->app;
int fd = open(app->script_file, O_RDONLY);
@@ -124,6 +138,7 @@ pipeline_run(void *pipeline)
p->script_file_done = 1;
}
+ /* Command Line Interface (CLI) */
status = cmdline_poll(p->cl);
if (status < 0)
rte_panic("CLI poll error (%" PRId32 ")\n", status);
@@ -132,6 +147,12 @@ pipeline_run(void *pipeline)
rte_exit(0, "Bye!\n");
}
+#ifdef RTE_LIBRTE_KNI
+ /* Handle KNI requests from Linux kernel */
+ for (i = 0; i < app->n_pktq_kni; i++)
+ rte_kni_handle_request(app->kni[i]);
+#endif /* RTE_LIBRTE_KNI */
+
return 0;
}
@@ -146,5 +167,4 @@ struct pipeline_be_ops pipeline_master_be_ops = {
.f_free = pipeline_free,
.f_run = pipeline_run,
.f_timer = pipeline_timer,
- .f_track = NULL,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough.c b/examples/ip_pipeline/pipeline/pipeline_passthrough.c
index fc2cae5e..63ce1472 100644
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough.c
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough.c
@@ -34,9 +34,36 @@
#include "pipeline_passthrough.h"
#include "pipeline_passthrough_be.h"
+static int
+app_pipeline_passthrough_track(struct pipeline_params *p,
+ uint32_t port_in,
+ uint32_t *port_out)
+{
+ struct pipeline_passthrough_params pp;
+ int status;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ status = pipeline_passthrough_parse_args(&pp, p);
+ if (status)
+ return -1;
+
+ if (pp.lb_hash_enabled)
+ return -1;
+
+ *port_out = port_in / (p->n_ports_in / p->n_ports_out);
+ return 0;
+}
+
static struct pipeline_fe_ops pipeline_passthrough_fe_ops = {
.f_init = NULL,
+ .f_post_init = NULL,
.f_free = NULL,
+ .f_track = app_pipeline_passthrough_track,
.cmds = NULL,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
index a0d11aea..6146a28f 100644
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
@@ -547,6 +547,18 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
"dma_src_mask", dma_mask_str);
}
+ if (p->lb_hash_enabled)
+ PIPELINE_ARG_CHECK((params->n_ports_out > 1),
+ "Parse error in section \"%s\": entry \"lb\" not "
+ "allowed for single output port pipeline",
+ params->name);
+ else
+ PIPELINE_ARG_CHECK(((params->n_ports_in >= params->n_ports_out)
+ && ((params->n_ports_in % params->n_ports_out) == 0)),
+ "Parse error in section \"%s\": n_ports_in needs to be "
+ "a multiple of n_ports_out (lb mode disabled)",
+ params->name);
+
return 0;
}
@@ -579,9 +591,7 @@ pipeline_passthrough_init(struct pipeline_params *params,
/* Check input arguments */
if ((params == NULL) ||
(params->n_ports_in == 0) ||
- (params->n_ports_out == 0) ||
- (params->n_ports_in < params->n_ports_out) ||
- (params->n_ports_in % params->n_ports_out))
+ (params->n_ports_out == 0))
return NULL;
/* Memory allocation */
@@ -702,10 +712,13 @@ pipeline_passthrough_init(struct pipeline_params *params,
/* Add entries to tables */
for (i = 0; i < p->n_ports_in; i++) {
+ uint32_t port_out_id = (p_pt->params.lb_hash_enabled == 0) ?
+ (i / (p->n_ports_in / p->n_ports_out)) :
+ 0;
+
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[
- i / (p->n_ports_in / p->n_ports_out)]},
+ {.port_id = p->port_out_id[port_out_id]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
@@ -780,25 +793,9 @@ pipeline_passthrough_timer(void *pipeline)
return 0;
}
-static int
-pipeline_passthrough_track(void *pipeline, uint32_t port_in, uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- *port_out = port_in / p->n_ports_in;
- return 0;
-}
-
struct pipeline_be_ops pipeline_passthrough_be_ops = {
.f_init = pipeline_passthrough_init,
.f_free = pipeline_passthrough_free,
.f_run = NULL,
.f_timer = pipeline_passthrough_timer,
- .f_track = pipeline_passthrough_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.c b/examples/ip_pipeline/pipeline/pipeline_routing.c
index eab89f2e..3aadbf91 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,12 +34,11 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
#include "app.h"
#include "pipeline_common_fe.h"
#include "pipeline_routing.h"
+#include "parser.h"
struct app_pipeline_routing_route {
struct pipeline_routing_route_key key;
@@ -59,8 +58,14 @@ struct app_pipeline_routing_arp_entry {
struct pipeline_routing {
/* Parameters */
+ struct app_params *app;
+ uint32_t pipeline_id;
uint32_t n_ports_in;
uint32_t n_ports_out;
+ struct pipeline_routing_params rp;
+
+ /* Links */
+ uint32_t link_id[PIPELINE_MAX_PORT_OUT];
/* Routes */
TAILQ_HEAD(, app_pipeline_routing_route) routes;
@@ -79,12 +84,151 @@ struct pipeline_routing {
void *default_arp_entry_ptr;
};
+static int
+app_pipeline_routing_find_link(struct pipeline_routing *p,
+ uint32_t link_id,
+ uint32_t *port_id)
+{
+ uint32_t i;
+
+ for (i = 0; i < p->n_ports_out; i++)
+ if (p->link_id[i] == link_id) {
+ *port_id = i;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void
+app_pipeline_routing_link_op(__rte_unused struct app_params *app,
+ uint32_t link_id,
+ uint32_t up,
+ void *arg)
+{
+ struct pipeline_routing_route_key key0, key1;
+ struct pipeline_routing *p = arg;
+ struct app_link_params *lp;
+ uint32_t port_id, netmask;
+ int status;
+
+ if (app == NULL)
+ return;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, lp);
+ if (lp == NULL)
+ return;
+
+ status = app_pipeline_routing_find_link(p,
+ link_id,
+ &port_id);
+ if (status)
+ return;
+
+ netmask = (~0U) << (32 - lp->depth);
+
+ /* Local network (directly attached network) */
+ key0.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key0.key.ipv4.ip = lp->ip & netmask;
+ key0.key.ipv4.depth = lp->depth;
+
+ /* Local termination */
+ key1.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key1.key.ipv4.ip = lp->ip;
+ key1.key.ipv4.depth = 32;
+
+ if (up) {
+ struct pipeline_routing_route_data data0, data1;
+
+ /* Local network (directly attached network) */
+ memset(&data0, 0, sizeof(data0));
+ data0.flags = PIPELINE_ROUTING_ROUTE_LOCAL |
+ PIPELINE_ROUTING_ROUTE_ARP;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ)
+ data0.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) {
+ data0.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ data0.l2.mpls.n_labels = 1;
+ }
+ data0.port_id = port_id;
+
+ if (p->rp.n_arp_entries)
+ app_pipeline_routing_add_route(app,
+ p->pipeline_id,
+ &key0,
+ &data0);
+
+ /* Local termination */
+ memset(&data1, 0, sizeof(data1));
+ data1.flags = PIPELINE_ROUTING_ROUTE_LOCAL |
+ PIPELINE_ROUTING_ROUTE_ARP;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ)
+ data1.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) {
+ data1.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ data1.l2.mpls.n_labels = 1;
+ }
+ data1.port_id = p->rp.port_local_dest;
+
+ app_pipeline_routing_add_route(app,
+ p->pipeline_id,
+ &key1,
+ &data1);
+ } else {
+ /* Local network (directly attached network) */
+ if (p->rp.n_arp_entries)
+ app_pipeline_routing_delete_route(app,
+ p->pipeline_id,
+ &key0);
+
+ /* Local termination */
+ app_pipeline_routing_delete_route(app,
+ p->pipeline_id,
+ &key1);
+ }
+}
+
+static int
+app_pipeline_routing_set_link_op(
+ struct app_params *app,
+ struct pipeline_routing *p)
+{
+ uint32_t port_id;
+
+ for (port_id = 0; port_id < p->n_ports_out; port_id++) {
+ struct app_link_params *link;
+ uint32_t link_id;
+ int status;
+
+ link = app_pipeline_track_pktq_out_to_link(app,
+ p->pipeline_id,
+ port_id);
+ if (link == NULL)
+ continue;
+
+ link_id = link - app->link_params;
+ p->link_id[port_id] = link_id;
+
+ status = app_link_set_op(app,
+ link_id,
+ p->pipeline_id,
+ app_pipeline_routing_link_op,
+ (void *) p);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
static void *
-pipeline_routing_init(struct pipeline_params *params,
- __rte_unused void *arg)
+app_pipeline_routing_init(struct pipeline_params *params,
+ void *arg)
{
+ struct app_params *app = (struct app_params *) arg;
struct pipeline_routing *p;
- uint32_t size;
+ uint32_t pipeline_id, size;
+ int status;
/* Check input arguments */
if ((params == NULL) ||
@@ -92,6 +236,8 @@ pipeline_routing_init(struct pipeline_params *params,
(params->n_ports_out == 0))
return NULL;
+ APP_PARAM_GET_ID(params, "PIPELINE", pipeline_id);
+
/* Memory allocation */
size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_routing));
p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
@@ -99,19 +245,40 @@ pipeline_routing_init(struct pipeline_params *params,
return NULL;
/* Initialization */
+ p->app = app;
+ p->pipeline_id = pipeline_id;
p->n_ports_in = params->n_ports_in;
p->n_ports_out = params->n_ports_out;
+ status = pipeline_routing_parse_args(&p->rp, params);
+ if (status) {
+ rte_free(p);
+ return NULL;
+ }
TAILQ_INIT(&p->routes);
p->n_routes = 0;
TAILQ_INIT(&p->arp_entries);
p->n_arp_entries = 0;
+ app_pipeline_routing_set_link_op(app, p);
+
return p;
}
static int
+app_pipeline_routing_post_init(void *pipeline)
+{
+ struct pipeline_routing *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ return app_pipeline_routing_set_macaddr(p->app, p->pipeline_id);
+}
+
+static int
app_pipeline_routing_free(void *pipeline)
{
struct pipeline_routing *p = pipeline;
@@ -198,7 +365,9 @@ print_route(const struct app_pipeline_routing_route *route)
key->depth,
route->data.port_id);
- if (route->data.flags & PIPELINE_ROUTING_ROUTE_ARP)
+ if (route->data.flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ printf(", Local");
+ else if (route->data.flags & PIPELINE_ROUTING_ROUTE_ARP)
printf(
", Next Hop IP = %" PRIu32 ".%" PRIu32
".%" PRIu32 ".%" PRIu32,
@@ -383,8 +552,6 @@ app_pipeline_routing_add_route(struct app_params *app,
p->n_routes++;
}
- print_route(entry);
-
/* Message buffer free */
app_msg_free(app, rsp);
return 0;
@@ -677,8 +844,6 @@ app_pipeline_routing_add_arp_entry(struct app_params *app, uint32_t pipeline_id,
p->n_arp_entries++;
}
- print_arp_entry(entry);
-
/* Message buffer free */
app_msg_free(app, rsp);
return 0;
@@ -853,1382 +1018,600 @@ app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
return 0;
}
-static int
-parse_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+int
+app_pipeline_routing_set_macaddr(struct app_params *app,
+ uint32_t pipeline_id)
{
- uint32_t n_max_labels = *n_labels, count = 0;
-
- /* Check for void list of labels */
- if (strcmp(string, "<void>") == 0) {
- *n_labels = 0;
- return 0;
- }
+ struct app_pipeline_params *p;
+ struct pipeline_routing_set_macaddr_msg_req *req;
+ struct pipeline_routing_set_macaddr_msg_rsp *rsp;
+ uint32_t port_id;
- /* At least one label should be present */
- for ( ; (*string != '\0'); ) {
- char *next;
- int value;
+ /* Check input arguments */
+ if (app == NULL)
+ return -EINVAL;
- if (count >= n_max_labels)
- return -1;
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -EINVAL;
- if (count > 0) {
- if (string[0] != ':')
- return -1;
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -ENOMEM;
- string++;
- }
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_SET_MACADDR;
- value = strtol(string, &next, 10);
- if (next == string)
- return -1;
- string = next;
+ memset(req->macaddr, 0, sizeof(req->macaddr));
+ for (port_id = 0; port_id < p->n_pktq_out; port_id++) {
+ struct app_link_params *link;
- labels[count++] = (uint32_t) value;
+ link = app_pipeline_track_pktq_out_to_link(app,
+ pipeline_id,
+ port_id);
+ if (link)
+ req->macaddr[port_id] = link->mac_addr;
}
- *n_labels = count;
- return 0;
-}
-
-/*
- * route add (mpls = no, qinq = no, arp = no)
- */
-
-struct cmd_route_add1_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- struct ether_addr macaddr;
-};
-
-static void
-cmd_route_add1_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add1_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- int status;
-
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = 0;
- route_data.port_id = params->port;
- route_data.ethernet.macaddr = params->macaddr;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -ETIMEDOUT;
- if (status != 0) {
- printf("Command failed\n");
- return;
+ /* Read response and write entry */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return rsp->status;
}
-}
-
-static cmdline_parse_token_string_t cmd_route_add1_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add1_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, p, UINT32);
-static cmdline_parse_token_string_t cmd_route_add1_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add1_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add1_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add1_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_add1_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, depth, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add1_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, port_string,
- "port");
-
-static cmdline_parse_token_num_t cmd_route_add1_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, port, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add1_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, ether_string,
- "ether");
-
-static cmdline_parse_token_etheraddr_t cmd_route_add1_macaddr =
- TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add1_result, macaddr);
-
-static cmdline_parse_inst_t cmd_route_add1 = {
- .f = cmd_route_add1_parsed,
- .data = NULL,
- .help_str = "Route add (mpls = no, qinq = no, arp = no)",
- .tokens = {
- (void *)&cmd_route_add1_p_string,
- (void *)&cmd_route_add1_p,
- (void *)&cmd_route_add1_route_string,
- (void *)&cmd_route_add1_add_string,
- (void *)&cmd_route_add1_ip,
- (void *)&cmd_route_add1_depth,
- (void *)&cmd_route_add1_port_string,
- (void *)&cmd_route_add1_port,
- (void *)&cmd_route_add1_ether_string,
- (void *)&cmd_route_add1_macaddr,
- NULL,
- },
-};
-
-/*
- * route add (mpls = no, qinq = no, arp = yes)
- */
-
-struct cmd_route_add2_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- cmdline_ipaddr_t nh_ip;
-};
-
-static void
-cmd_route_add2_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add2_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- int status;
-
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = PIPELINE_ROUTING_ROUTE_ARP;
- route_data.port_id = params->port;
- route_data.ethernet.ip =
- rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
+ /* Free response */
+ app_msg_free(app, rsp);
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
+ return 0;
}
-static cmdline_parse_token_string_t cmd_route_add2_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add2_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add2_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add2_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add2_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add2_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_add2_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, depth, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add2_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, port_string,
- "port");
-
-static cmdline_parse_token_num_t cmd_route_add2_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, port, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add2_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, ether_string,
- "ether");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add2_nh_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add2_result, nh_ip);
-
-static cmdline_parse_inst_t cmd_route_add2 = {
- .f = cmd_route_add2_parsed,
- .data = NULL,
- .help_str = "Route add (mpls = no, qinq = no, arp = yes)",
- .tokens = {
- (void *)&cmd_route_add2_p_string,
- (void *)&cmd_route_add2_p,
- (void *)&cmd_route_add2_route_string,
- (void *)&cmd_route_add2_add_string,
- (void *)&cmd_route_add2_ip,
- (void *)&cmd_route_add2_depth,
- (void *)&cmd_route_add2_port_string,
- (void *)&cmd_route_add2_port,
- (void *)&cmd_route_add2_ether_string,
- (void *)&cmd_route_add2_nh_ip,
- NULL,
- },
-};
-
/*
- * route add (mpls = no, qinq = yes, arp = no)
+ * route
+ *
+ * route add (ARP = ON/OFF, MPLS = ON/OFF, QINQ = ON/OFF):
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr> qinq <svlan> <cvlan>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr> qinq <svlan> <cvlan>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr> mpls <mpls labels>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr> mpls <mpls labels>
+ *
+ * route add default:
+ * p <pipelineid> route add default <portid>
+ *
+ * route del:
+ * p <pipelineid> route del <ipaddr> <depth>
+ *
+ * route del default:
+ * p <pipelineid> route del default
+ *
+ * route ls:
+ * p <pipelineid> route ls
*/
-struct cmd_route_add3_result {
+struct cmd_route_result {
cmdline_fixed_string_t p_string;
uint32_t p;
cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- struct ether_addr macaddr;
- cmdline_fixed_string_t qinq_string;
- uint32_t svlan;
- uint32_t cvlan;
+ cmdline_multi_string_t multi_string;
};
static void
-cmd_route_add3_parsed(
+cmd_route_parsed(
void *parsed_result,
__rte_unused struct cmdline *cl,
void *data)
{
- struct cmd_route_add3_result *params = parsed_result;
+ struct cmd_route_result *params = parsed_result;
struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- int status;
-
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = PIPELINE_ROUTING_ROUTE_QINQ;
- route_data.port_id = params->port;
- route_data.ethernet.macaddr = params->macaddr;
- route_data.l2.qinq.svlan = params->svlan;
- route_data.l2.qinq.cvlan = params->cvlan;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
-
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
-}
-static cmdline_parse_token_string_t cmd_route_add3_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add3_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add3_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add3_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add3_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add3_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_add3_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, depth, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add3_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, port_string,
- "port");
-
-static cmdline_parse_token_num_t cmd_route_add3_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, port, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add3_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, ether_string,
- "ether");
-
-static cmdline_parse_token_etheraddr_t cmd_route_add3_macaddr =
- TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add3_result, macaddr);
-
-static cmdline_parse_token_string_t cmd_route_add3_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, qinq_string,
- "qinq");
-
-static cmdline_parse_token_num_t cmd_route_add3_svlan =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, svlan, UINT32);
-
-static cmdline_parse_token_num_t cmd_route_add3_cvlan =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, cvlan, UINT32);
-
-static cmdline_parse_inst_t cmd_route_add3 = {
- .f = cmd_route_add3_parsed,
- .data = NULL,
- .help_str = "Route add (qinq = yes, arp = no)",
- .tokens = {
- (void *)&cmd_route_add3_p_string,
- (void *)&cmd_route_add3_p,
- (void *)&cmd_route_add3_route_string,
- (void *)&cmd_route_add3_add_string,
- (void *)&cmd_route_add3_ip,
- (void *)&cmd_route_add3_depth,
- (void *)&cmd_route_add3_port_string,
- (void *)&cmd_route_add3_port,
- (void *)&cmd_route_add3_ether_string,
- (void *)&cmd_route_add3_macaddr,
- (void *)&cmd_route_add3_qinq_string,
- (void *)&cmd_route_add3_svlan,
- (void *)&cmd_route_add3_cvlan,
- NULL,
- },
-};
-
-/*
- * route add (mpls = no, qinq = yes, arp = yes)
- */
-
-struct cmd_route_add4_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- cmdline_ipaddr_t nh_ip;
- cmdline_fixed_string_t qinq_string;
- uint32_t svlan;
- uint32_t cvlan;
-};
-
-static void
-cmd_route_add4_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add4_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
int status;
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = PIPELINE_ROUTING_ROUTE_QINQ |
- PIPELINE_ROUTING_ROUTE_ARP;
- route_data.port_id = params->port;
- route_data.ethernet.ip =
- rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
- route_data.l2.qinq.svlan = params->svlan;
- route_data.l2.qinq.cvlan = params->cvlan;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
-
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
if (status != 0) {
- printf("Command failed\n");
+ printf(CMD_MSG_TOO_MANY_ARGS, "route");
return;
}
-}
-
-static cmdline_parse_token_string_t cmd_route_add4_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add4_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, p, UINT32);
-static cmdline_parse_token_string_t cmd_route_add4_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add4_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add4_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add4_result, ip);
+ /* route add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ struct in_addr ipv4, nh_ipv4;
+ struct ether_addr mac_addr;
+ uint32_t depth, port_id, svlan, cvlan, i;
+ uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
+ uint32_t n_labels = RTE_DIM(mpls_labels);
+
+ memset(&key, 0, sizeof(key));
+ memset(&route_data, 0, sizeof(route_data));
+
+ if (n_tokens < 7) {
+ printf(CMD_MSG_NOT_ENOUGH_ARGS, "route add");
+ return;
+ }
-static cmdline_parse_token_num_t cmd_route_add4_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, depth, UINT32);
+ if (parse_ipv4_addr(tokens[1], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
-static cmdline_parse_token_string_t cmd_route_add4_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, port_string,
- "port");
+ if (parser_read_uint32(&depth, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
-static cmdline_parse_token_num_t cmd_route_add4_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, port, UINT32);
+ if (strcmp(tokens[3], "port")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-static cmdline_parse_token_string_t cmd_route_add4_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, ether_string,
- "ether");
+ if (parser_read_uint32(&port_id, tokens[4])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-static cmdline_parse_token_ipaddr_t cmd_route_add4_nh_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add4_result, nh_ip);
+ if (strcmp(tokens[5], "ether")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "ether");
+ return;
+ }
-static cmdline_parse_token_string_t cmd_route_add4_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, qinq_string,
- "qinq");
+ if (parse_mac_addr(tokens[6], &mac_addr)) {
+ if (parse_ipv4_addr(tokens[6], &nh_ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "nhmacaddr or nhipaddr");
+ return;
+ }
-static cmdline_parse_token_num_t cmd_route_add4_svlan =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, svlan, UINT32);
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_ARP;
+ }
-static cmdline_parse_token_num_t cmd_route_add4_cvlan =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, cvlan, UINT32);
+ if (n_tokens > 7) {
+ if (strcmp(tokens[7], "mpls") == 0) {
+ if (n_tokens != 9) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add mpls");
+ return;
+ }
+
+ if (parse_mpls_labels(tokens[8], mpls_labels, &n_labels)) {
+ printf(CMD_MSG_INVALID_ARG, "mpls labels");
+ return;
+ }
+
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ } else if (strcmp(tokens[7], "qinq") == 0) {
+ if (n_tokens != 10) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add qinq");
+ return;
+ }
+
+ if (parser_read_uint32(&svlan, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
+ if (parser_read_uint32(&cvlan, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
+
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ } else {
+ printf(CMD_MSG_ARG_NOT_FOUND, "mpls or qinq");
+ return;
+ }
+ }
-static cmdline_parse_inst_t cmd_route_add4 = {
- .f = cmd_route_add4_parsed,
- .data = NULL,
- .help_str = "Route add (qinq = yes, arp = yes)",
- .tokens = {
- (void *)&cmd_route_add4_p_string,
- (void *)&cmd_route_add4_p,
- (void *)&cmd_route_add4_route_string,
- (void *)&cmd_route_add4_add_string,
- (void *)&cmd_route_add4_ip,
- (void *)&cmd_route_add4_depth,
- (void *)&cmd_route_add4_port_string,
- (void *)&cmd_route_add4_port,
- (void *)&cmd_route_add4_ether_string,
- (void *)&cmd_route_add4_nh_ip,
- (void *)&cmd_route_add4_qinq_string,
- (void *)&cmd_route_add4_svlan,
- (void *)&cmd_route_add4_cvlan,
- NULL,
- },
-};
+ switch (route_data.flags) {
+ case 0:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ break;
-/*
- * route add (mpls = yes, qinq = no, arp = no)
- */
+ case PIPELINE_ROUTING_ROUTE_ARP:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ break;
-struct cmd_route_add5_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- struct ether_addr macaddr;
- cmdline_fixed_string_t mpls_string;
- cmdline_fixed_string_t mpls_labels;
-};
+ case PIPELINE_ROUTING_ROUTE_MPLS:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ for (i = 0; i < n_labels; i++)
+ route_data.l2.mpls.labels[i] = mpls_labels[i];
+ route_data.l2.mpls.n_labels = n_labels;
+ break;
-static void
-cmd_route_add5_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add5_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
- uint32_t n_labels = RTE_DIM(mpls_labels);
- uint32_t i;
- int status;
+ case PIPELINE_ROUTING_ROUTE_MPLS | PIPELINE_ROUTING_ROUTE_ARP:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ for (i = 0; i < n_labels; i++)
+ route_data.l2.mpls.labels[i] = mpls_labels[i];
+ route_data.l2.mpls.n_labels = n_labels;
+ break;
- /* Parse MPLS labels */
- status = parse_labels(params->mpls_labels, mpls_labels, &n_labels);
- if (status) {
- printf("MPLS labels parse error\n");
- return;
- }
+ case PIPELINE_ROUTING_ROUTE_QINQ:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ route_data.l2.qinq.svlan = svlan;
+ route_data.l2.qinq.cvlan = cvlan;
+ break;
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
+ case PIPELINE_ROUTING_ROUTE_QINQ | PIPELINE_ROUTING_ROUTE_ARP:
+ default:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ route_data.l2.qinq.svlan = svlan;
+ route_data.l2.qinq.cvlan = cvlan;
+ break;
+ }
- route_data.flags = PIPELINE_ROUTING_ROUTE_MPLS;
- route_data.port_id = params->port;
- route_data.ethernet.macaddr = params->macaddr;
- for (i = 0; i < n_labels; i++)
- route_data.l2.mpls.labels[i] = mpls_labels[i];
- route_data.l2.mpls.n_labels = n_labels;
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.depth = depth;
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route add");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_add5_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add5_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add5_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add5_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add5_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add5_result, ip);
+ } /* route add */
-static cmdline_parse_token_num_t cmd_route_add5_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, depth, UINT32);
+ /* route add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
-static cmdline_parse_token_string_t cmd_route_add5_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, port_string,
- "port");
-
-static cmdline_parse_token_num_t cmd_route_add5_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, port, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add5_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, ether_string,
- "ether");
-
-static cmdline_parse_token_etheraddr_t cmd_route_add5_macaddr =
- TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add5_result, macaddr);
-
-static cmdline_parse_token_string_t cmd_route_add5_mpls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, mpls_string,
- "mpls");
-
-static cmdline_parse_token_string_t cmd_route_add5_mpls_labels =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, mpls_labels,
- NULL);
-
-static cmdline_parse_inst_t cmd_route_add5 = {
- .f = cmd_route_add5_parsed,
- .data = NULL,
- .help_str = "Route add (mpls = yes, arp = no)",
- .tokens = {
- (void *)&cmd_route_add5_p_string,
- (void *)&cmd_route_add5_p,
- (void *)&cmd_route_add5_route_string,
- (void *)&cmd_route_add5_add_string,
- (void *)&cmd_route_add5_ip,
- (void *)&cmd_route_add5_depth,
- (void *)&cmd_route_add5_port_string,
- (void *)&cmd_route_add5_port,
- (void *)&cmd_route_add5_ether_string,
- (void *)&cmd_route_add5_macaddr,
- (void *)&cmd_route_add5_mpls_string,
- (void *)&cmd_route_add5_mpls_labels,
- NULL,
- },
-};
-
-/*
- * route add (mpls = yes, qinq = no, arp = yes)
- */
-
-struct cmd_route_add6_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- cmdline_ipaddr_t nh_ip;
- cmdline_fixed_string_t mpls_string;
- cmdline_fixed_string_t mpls_labels;
-};
-
-static void
-cmd_route_add6_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add6_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
- uint32_t n_labels = RTE_DIM(mpls_labels);
- uint32_t i;
- int status;
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add default");
+ return;
+ }
- /* Parse MPLS labels */
- status = parse_labels(params->mpls_labels, mpls_labels, &n_labels);
- if (status) {
- printf("MPLS labels parse error\n");
- return;
- }
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = PIPELINE_ROUTING_ROUTE_MPLS |
- PIPELINE_ROUTING_ROUTE_ARP;
- route_data.port_id = params->port;
- route_data.ethernet.ip =
- rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
- for (i = 0; i < n_labels; i++)
- route_data.l2.mpls.labels[i] = mpls_labels[i];
- route_data.l2.mpls.n_labels = n_labels;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
+ status = app_pipeline_routing_add_default_route(app,
+ params->p,
+ port_id);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route add default");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_add6_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add6_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add6_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add6_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add6_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add6_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_add6_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, depth, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add6_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, port_string,
- "port");
+ } /* route add default */
-static cmdline_parse_token_num_t cmd_route_add6_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, port, UINT32);
+ /* route del*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_route_key key;
+ struct in_addr ipv4;
+ uint32_t depth;
-static cmdline_parse_token_string_t cmd_route_add6_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, ether_string,
- "ether");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add6_nh_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add6_result, nh_ip);
-
-static cmdline_parse_token_string_t cmd_route_add6_mpls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, mpls_string,
- "mpls");
-
-static cmdline_parse_token_string_t cmd_route_add6_mpls_labels =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, mpls_labels,
- NULL);
-
-static cmdline_parse_inst_t cmd_route_add6 = {
- .f = cmd_route_add6_parsed,
- .data = NULL,
- .help_str = "Route add (mpls = yes, arp = yes)",
- .tokens = {
- (void *)&cmd_route_add6_p_string,
- (void *)&cmd_route_add6_p,
- (void *)&cmd_route_add6_route_string,
- (void *)&cmd_route_add6_add_string,
- (void *)&cmd_route_add6_ip,
- (void *)&cmd_route_add6_depth,
- (void *)&cmd_route_add6_port_string,
- (void *)&cmd_route_add6_port,
- (void *)&cmd_route_add6_ether_string,
- (void *)&cmd_route_add6_nh_ip,
- (void *)&cmd_route_add6_mpls_string,
- (void *)&cmd_route_add6_mpls_labels,
- NULL,
- },
-};
+ memset(&key, 0, sizeof(key));
-/*
- * route del
- */
-
-struct cmd_route_del_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t del_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
-};
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route del");
+ return;
+ }
-static void
-cmd_route_del_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_del_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
+ if (parse_ipv4_addr(tokens[1], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
- int status;
+ if (parser_read_uint32(&depth, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.depth = depth;
- status = app_pipeline_routing_delete_route(app, params->p, &key);
+ status = app_pipeline_routing_delete_route(app, params->p, &key);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route del");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_del_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_del_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_del_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_del_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_del_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, del_string,
- "del");
-
-static cmdline_parse_token_ipaddr_t cmd_route_del_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_del_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_del_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_del_result, depth, UINT32);
-
-static cmdline_parse_inst_t cmd_route_del = {
- .f = cmd_route_del_parsed,
- .data = NULL,
- .help_str = "Route delete",
- .tokens = {
- (void *)&cmd_route_del_p_string,
- (void *)&cmd_route_del_p,
- (void *)&cmd_route_del_route_string,
- (void *)&cmd_route_del_del_string,
- (void *)&cmd_route_del_ip,
- (void *)&cmd_route_del_depth,
- NULL,
- },
-};
-
-/*
- * route add default
- */
-
-struct cmd_route_add_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t default_string;
- uint32_t port;
-};
-
-static void
-cmd_route_add_default_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ } /* route del */
+
+ /* route del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route del default");
+ return;
+ }
- status = app_pipeline_routing_add_default_route(app, params->p,
- params->port);
+ status = app_pipeline_routing_delete_default_route(app,
+ params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route del default");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_add_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result, p_string,
- "p");
+ } /* route del default */
-static cmdline_parse_token_num_t cmd_route_add_default_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add_default_result, p, UINT32);
-
-cmdline_parse_token_string_t cmd_route_add_default_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
- route_string, "route");
-
-cmdline_parse_token_string_t cmd_route_add_default_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
- add_string, "add");
-
-cmdline_parse_token_string_t cmd_route_add_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
- default_string, "default");
-
-cmdline_parse_token_num_t cmd_route_add_default_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add_default_result,
- port, UINT32);
-
-cmdline_parse_inst_t cmd_route_add_default = {
- .f = cmd_route_add_default_parsed,
- .data = NULL,
- .help_str = "Route default set",
- .tokens = {
- (void *)&cmd_route_add_default_p_string,
- (void *)&cmd_route_add_default_p,
- (void *)&cmd_route_add_default_route_string,
- (void *)&cmd_route_add_default_add_string,
- (void *)&cmd_route_add_default_default_string,
- (void *)&cmd_route_add_default_port,
- NULL,
- },
-};
-
-/*
- * route del default
- */
-
-struct cmd_route_del_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t default_string;
-};
-
-static void
-cmd_route_del_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_del_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ /* route ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route ls");
+ return;
+ }
- status = app_pipeline_routing_delete_default_route(app, params->p);
+ status = app_pipeline_routing_route_ls(app, params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route ls");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_del_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_del_default_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_del_default_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_del_default_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
- route_string, "route");
-
-static cmdline_parse_token_string_t cmd_route_del_default_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
- del_string, "del");
-
-static cmdline_parse_token_string_t cmd_route_del_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
- default_string, "default");
-
-
-static cmdline_parse_inst_t cmd_route_del_default = {
- .f = cmd_route_del_default_parsed,
- .data = NULL,
- .help_str = "Route default clear",
- .tokens = {
- (void *)&cmd_route_del_default_p_string,
- (void *)&cmd_route_del_default_p,
- (void *)&cmd_route_del_default_route_string,
- (void *)&cmd_route_del_default_del_string,
- (void *)&cmd_route_del_default_default_string,
- NULL,
- },
-};
-
-/*
- * route ls
- */
-
-struct cmd_route_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t ls_string;
-};
-
-static void
-cmd_route_ls_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_routing_route_ls(app, params->p);
+ } /* route ls */
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
+ printf(CMD_MSG_MISMATCH_ARGS, "route");
}
-static cmdline_parse_token_string_t cmd_route_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result, p_string, "p");
+static cmdline_parse_token_string_t cmd_route_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, p_string, "p");
-static cmdline_parse_token_num_t cmd_route_ls_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_ls_result, p, UINT32);
+static cmdline_parse_token_num_t cmd_route_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_result, p, UINT32);
-static cmdline_parse_token_string_t cmd_route_ls_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result,
- route_string, "route");
+static cmdline_parse_token_string_t cmd_route_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, route_string, "route");
-static cmdline_parse_token_string_t cmd_route_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_route_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, multi_string,
+ TOKEN_STRING_MULTI);
-static cmdline_parse_inst_t cmd_route_ls = {
- .f = cmd_route_ls_parsed,
+static cmdline_parse_inst_t cmd_route = {
+ .f = cmd_route_parsed,
.data = NULL,
- .help_str = "Route list",
+ .help_str = "route add / add default / del / del default / ls",
.tokens = {
- (void *)&cmd_route_ls_p_string,
- (void *)&cmd_route_ls_p,
- (void *)&cmd_route_ls_route_string,
- (void *)&cmd_route_ls_ls_string,
+ (void *)&cmd_route_p_string,
+ (void *)&cmd_route_p,
+ (void *)&cmd_route_route_string,
+ (void *)&cmd_route_multi_string,
NULL,
},
};
/*
- * arp add
+ * arp
+ *
+ * arp add:
+ * p <pipelineid> arp add <portid> <ipaddr> <macaddr>
+ *
+ * arp add default:
+ * p <pipelineid> arp add default <portid>
+ *
+ * arp del:
+ * p <pipelineid> arp del <portid> <ipaddr>
+ *
+ * arp del default:
+ * p <pipelineid> arp del default
+ *
+ * arp ls:
+ * p <pipelineid> arp ls
*/
-struct cmd_arp_add_result {
+struct cmd_arp_result {
cmdline_fixed_string_t p_string;
uint32_t p;
cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t add_string;
- uint32_t port_id;
- cmdline_ipaddr_t ip;
- struct ether_addr macaddr;
-
+ cmdline_multi_string_t multi_string;
};
static void
-cmd_arp_add_parsed(
+cmd_arp_parsed(
void *parsed_result,
__rte_unused struct cmdline *cl,
void *data)
{
- struct cmd_arp_add_result *params = parsed_result;
+ struct cmd_arp_result *params = parsed_result;
struct app_params *app = data;
- struct pipeline_routing_arp_key key;
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
int status;
- key.type = PIPELINE_ROUTING_ARP_IPV4;
- key.key.ipv4.port_id = params->port_id;
- key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
-
- status = app_pipeline_routing_add_arp_entry(app,
- params->p,
- &key,
- &params->macaddr);
-
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
if (status != 0) {
- printf("Command failed\n");
+ printf(CMD_MSG_TOO_MANY_ARGS, "arp");
return;
}
-}
-
-static cmdline_parse_token_string_t cmd_arp_add_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_arp_add_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_add_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, arp_string, "arp");
-static cmdline_parse_token_string_t cmd_arp_add_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, add_string, "add");
+ /* arp add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_arp_key key;
+ struct in_addr ipv4;
+ struct ether_addr mac_addr;
+ uint32_t port_id;
-static cmdline_parse_token_num_t cmd_arp_add_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, port_id, UINT32);
-
-static cmdline_parse_token_ipaddr_t cmd_arp_add_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_arp_add_result, ip);
-
-static cmdline_parse_token_etheraddr_t cmd_arp_add_macaddr =
- TOKEN_ETHERADDR_INITIALIZER(struct cmd_arp_add_result, macaddr);
-
-static cmdline_parse_inst_t cmd_arp_add = {
- .f = cmd_arp_add_parsed,
- .data = NULL,
- .help_str = "ARP add",
- .tokens = {
- (void *)&cmd_arp_add_p_string,
- (void *)&cmd_arp_add_p,
- (void *)&cmd_arp_add_arp_string,
- (void *)&cmd_arp_add_add_string,
- (void *)&cmd_arp_add_port_id,
- (void *)&cmd_arp_add_ip,
- (void *)&cmd_arp_add_macaddr,
- NULL,
- },
-};
+ memset(&key, 0, sizeof(key));
-/*
- * arp del
- */
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp add");
+ return;
+ }
-struct cmd_arp_del_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t del_string;
- uint32_t port_id;
- cmdline_ipaddr_t ip;
-};
+ if (parser_read_uint32(&port_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-static void
-cmd_arp_del_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_del_result *params = parsed_result;
- struct app_params *app = data;
+ if (parse_ipv4_addr(tokens[2], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
- struct pipeline_routing_arp_key key;
- int status;
+ if (parse_mac_addr(tokens[3], &mac_addr)) {
+ printf(CMD_MSG_INVALID_ARG, "macaddr");
+ return;
+ }
- key.type = PIPELINE_ROUTING_ARP_IPV4;
- key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
- key.key.ipv4.port_id = params->port_id;
+ key.type = PIPELINE_ROUTING_ARP_IPV4;
+ key.key.ipv4.port_id = port_id;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
- status = app_pipeline_routing_delete_arp_entry(app, params->p, &key);
+ status = app_pipeline_routing_add_arp_entry(app,
+ params->p,
+ &key,
+ &mac_addr);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp add");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_arp_del_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, p_string,
- "p");
+ } /* arp add */
-static cmdline_parse_token_num_t cmd_arp_del_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_del_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "arp");
-
-static cmdline_parse_token_string_t cmd_arp_del_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, del_string, "del");
-
-static cmdline_parse_token_num_t cmd_arp_del_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, port_id, UINT32);
-
-static cmdline_parse_token_ipaddr_t cmd_arp_del_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_arp_del_result, ip);
-
-static cmdline_parse_inst_t cmd_arp_del = {
- .f = cmd_arp_del_parsed,
- .data = NULL,
- .help_str = "ARP delete",
- .tokens = {
- (void *)&cmd_arp_del_p_string,
- (void *)&cmd_arp_del_p,
- (void *)&cmd_arp_del_arp_string,
- (void *)&cmd_arp_del_del_string,
- (void *)&cmd_arp_del_port_id,
- (void *)&cmd_arp_del_ip,
- NULL,
- },
-};
-
-/*
- * arp add default
- */
-
-struct cmd_arp_add_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t default_string;
- uint32_t port_id;
-};
+ /* arp add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
-static void
-cmd_arp_add_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_add_default_result *params = parsed_result;
- struct app_params *app = data;
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp add default");
+ return;
+ }
- int status;
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
- status = app_pipeline_routing_add_default_arp_entry(app,
- params->p,
- params->port_id);
+ status = app_pipeline_routing_add_default_arp_entry(app,
+ params->p,
+ port_id);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp add default");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_arp_add_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_arp_add_default_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_add_default_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_add_default_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, arp_string,
- "arp");
-
-static cmdline_parse_token_string_t cmd_arp_add_default_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, add_string,
- "add");
+ } /* arp add default */
-static cmdline_parse_token_string_t cmd_arp_add_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result,
- default_string, "default");
+ /* arp del*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_arp_key key;
+ struct in_addr ipv4;
+ uint32_t port_id;
-static cmdline_parse_token_num_t cmd_arp_add_default_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_add_default_result, port_id,
- UINT32);
+ memset(&key, 0, sizeof(key));
-static cmdline_parse_inst_t cmd_arp_add_default = {
- .f = cmd_arp_add_default_parsed,
- .data = NULL,
- .help_str = "ARP add default",
- .tokens = {
- (void *)&cmd_arp_add_default_p_string,
- (void *)&cmd_arp_add_default_p,
- (void *)&cmd_arp_add_default_arp_string,
- (void *)&cmd_arp_add_default_add_string,
- (void *)&cmd_arp_add_default_default_string,
- (void *)&cmd_arp_add_default_port_id,
- NULL,
- },
-};
-
-/*
- * arp del default
- */
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp del");
+ return;
+ }
-struct cmd_arp_del_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t default_string;
-};
+ if (parser_read_uint32(&port_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-static void
-cmd_arp_del_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_del_default_result *params = parsed_result;
- struct app_params *app = data;
+ if (parse_ipv4_addr(tokens[2], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
- int status;
+ key.type = PIPELINE_ROUTING_ARP_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.port_id = port_id;
- status = app_pipeline_routing_delete_default_arp_entry(app, params->p);
+ status = app_pipeline_routing_delete_arp_entry(app,
+ params->p,
+ &key);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp del");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_arp_del_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_arp_del_default_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_del_default_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_del_default_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, arp_string,
- "arp");
-
-static cmdline_parse_token_string_t cmd_arp_del_default_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, del_string,
- "del");
-
-static cmdline_parse_token_string_t cmd_arp_del_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result,
- default_string, "default");
-
-static cmdline_parse_inst_t cmd_arp_del_default = {
- .f = cmd_arp_del_default_parsed,
- .data = NULL,
- .help_str = "ARP delete default",
- .tokens = {
- (void *)&cmd_arp_del_default_p_string,
- (void *)&cmd_arp_del_default_p,
- (void *)&cmd_arp_del_default_arp_string,
- (void *)&cmd_arp_del_default_del_string,
- (void *)&cmd_arp_del_default_default_string,
- NULL,
- },
-};
+ } /* arp del */
+
+ /* arp del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp del default");
+ return;
+ }
+
+ status = app_pipeline_routing_delete_default_arp_entry(app,
+ params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp del default");
+
+ return;
+ } /* arp del default */
+
+ /* arp ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp ls");
+ return;
+ }
-/*
- * arp ls
- */
+ status = app_pipeline_routing_arp_ls(app, params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp ls");
-struct cmd_arp_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t ls_string;
-};
-
-static void
-cmd_arp_ls_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_ls_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing *p;
-
- p = app_pipeline_data_fe(app, params->p, &pipeline_routing);
- if (p == NULL)
return;
+ } /* arp ls */
- app_pipeline_routing_arp_ls(app, params->p);
+ printf(CMD_MSG_FAIL, "arp");
}
-static cmdline_parse_token_string_t cmd_arp_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, p_string,
- "p");
+static cmdline_parse_token_string_t cmd_arp_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, p_string, "p");
-static cmdline_parse_token_num_t cmd_arp_ls_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_ls_result, p, UINT32);
+static cmdline_parse_token_num_t cmd_arp_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_result, p, UINT32);
-static cmdline_parse_token_string_t cmd_arp_ls_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, arp_string,
- "arp");
+static cmdline_parse_token_string_t cmd_arp_arp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, arp_string, "arp");
-static cmdline_parse_token_string_t cmd_arp_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_arp_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, multi_string,
+ TOKEN_STRING_MULTI);
-static cmdline_parse_inst_t cmd_arp_ls = {
- .f = cmd_arp_ls_parsed,
+static cmdline_parse_inst_t cmd_arp = {
+ .f = cmd_arp_parsed,
.data = NULL,
- .help_str = "ARP list",
+ .help_str = "arp add / add default / del / del default / ls",
.tokens = {
- (void *)&cmd_arp_ls_p_string,
- (void *)&cmd_arp_ls_p,
- (void *)&cmd_arp_ls_arp_string,
- (void *)&cmd_arp_ls_ls_string,
+ (void *)&cmd_arp_p_string,
+ (void *)&cmd_arp_p,
+ (void *)&cmd_arp_arp_string,
+ (void *)&cmd_arp_multi_string,
NULL,
},
};
static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *)&cmd_route_add1,
- (cmdline_parse_inst_t *)&cmd_route_add2,
- (cmdline_parse_inst_t *)&cmd_route_add3,
- (cmdline_parse_inst_t *)&cmd_route_add4,
- (cmdline_parse_inst_t *)&cmd_route_add5,
- (cmdline_parse_inst_t *)&cmd_route_add6,
- (cmdline_parse_inst_t *)&cmd_route_del,
- (cmdline_parse_inst_t *)&cmd_route_add_default,
- (cmdline_parse_inst_t *)&cmd_route_del_default,
- (cmdline_parse_inst_t *)&cmd_route_ls,
- (cmdline_parse_inst_t *)&cmd_arp_add,
- (cmdline_parse_inst_t *)&cmd_arp_del,
- (cmdline_parse_inst_t *)&cmd_arp_add_default,
- (cmdline_parse_inst_t *)&cmd_arp_del_default,
- (cmdline_parse_inst_t *)&cmd_arp_ls,
+ (cmdline_parse_inst_t *)&cmd_route,
+ (cmdline_parse_inst_t *)&cmd_arp,
NULL,
};
static struct pipeline_fe_ops pipeline_routing_fe_ops = {
- .f_init = pipeline_routing_init,
+ .f_init = app_pipeline_routing_init,
+ .f_post_init = app_pipeline_routing_post_init,
.f_free = app_pipeline_routing_free,
+ .f_track = app_pipeline_track_default,
.cmds = pipeline_cmds,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.h b/examples/ip_pipeline/pipeline/pipeline_routing.h
index fa41642b..0197449b 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing.h
+++ b/examples/ip_pipeline/pipeline/pipeline_routing.h
@@ -86,6 +86,13 @@ app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
uint32_t pipeline_id);
/*
+ * SETTINGS
+ */
+int
+app_pipeline_routing_set_macaddr(struct app_params *app,
+ uint32_t pipeline_id);
+
+/*
* Pipeline type
*/
extern struct pipeline_type pipeline_routing;
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.c b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
index bc5bf7a5..21ac7888 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
@@ -65,7 +65,9 @@
((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
-#define MAC_SRC_DEFAULT 0x112233445566ULL
+/* Network Byte Order (NBO) */
+#define SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr, ethertype) \
+ (((uint64_t) macaddr) | (((uint64_t) rte_cpu_to_be_16(ethertype)) << 48))
#ifndef PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s
#define PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s 256
@@ -75,6 +77,7 @@ struct pipeline_routing {
struct pipeline p;
struct pipeline_routing_params params;
pipeline_msg_req_handler custom_handlers[PIPELINE_ROUTING_MSG_REQS];
+ uint64_t macaddr[PIPELINE_MAX_PORT_OUT];
} __rte_cache_aligned;
/*
@@ -132,6 +135,10 @@ static void *
pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p,
void *msg);
+static void *
+pipeline_routing_msg_req_set_macaddr_handler(struct pipeline *p,
+ void *msg);
+
static pipeline_msg_req_handler custom_handlers[] = {
[PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD] =
pipeline_routing_msg_req_route_add_handler,
@@ -149,6 +156,8 @@ static pipeline_msg_req_handler custom_handlers[] = {
pipeline_routing_msg_req_arp_add_default_handler,
[PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT] =
pipeline_routing_msg_req_arp_del_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_SET_MACADDR] =
+ pipeline_routing_msg_req_set_macaddr_handler,
};
/*
@@ -921,6 +930,7 @@ pipeline_routing_parse_args(struct pipeline_routing_params *p,
struct pipeline_params *params)
{
uint32_t n_routes_present = 0;
+ uint32_t port_local_dest_present = 0;
uint32_t encap_present = 0;
uint32_t qinq_sched_present = 0;
uint32_t mpls_color_mark_present = 0;
@@ -933,6 +943,7 @@ pipeline_routing_parse_args(struct pipeline_routing_params *p,
/* default values */
p->n_routes = PIPELINE_ROUTING_N_ROUTES_DEFAULT;
+ p->port_local_dest = params->n_ports_out - 1;
p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET;
p->qinq_sched = 0;
p->mpls_color_mark = 0;
@@ -962,6 +973,23 @@ pipeline_routing_parse_args(struct pipeline_routing_params *p,
continue;
}
+ /* port_local_dest */
+ if (strcmp(arg_name, "port_local_dest") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ port_local_dest_present == 0, params->name,
+ arg_name);
+ port_local_dest_present = 1;
+
+ status = parser_read_uint32(&p->port_local_dest,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
+ (p->port_local_dest < params->n_ports_out)),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
/* encap */
if (strcmp(arg_name, "encap") == 0) {
@@ -1419,27 +1447,6 @@ pipeline_routing_free(void *pipeline)
}
static int
-pipeline_routing_track(void *pipeline,
- __rte_unused uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_in == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-static int
pipeline_routing_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
@@ -1534,7 +1541,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether - ARP off */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
(p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t macaddr_dst;
uint64_t ethertype = ETHER_TYPE_IPv4;
@@ -1542,7 +1549,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
macaddr_dst = rte_bswap64(macaddr_dst << 16);
entry_arp0.slab[0] =
- rte_bswap64((macaddr_src << 16) | ethertype);
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype);
entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
entry_arp0.slab[1] = rte_bswap64(macaddr_dst);
@@ -1556,11 +1563,11 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether - ARP on */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t ethertype = ETHER_TYPE_IPv4;
- entry_arp1.slab[0] = rte_bswap64((macaddr_src << 16) |
- ethertype);
+ entry_arp1.slab[0] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype);
entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
entry_arp1.data_offset = entry_arp1.slab_offset[0] - 6
@@ -1571,7 +1578,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether QinQ - ARP off */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
(p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t macaddr_dst;
uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
uint64_t ethertype_vlan = 0x8100;
@@ -1588,8 +1595,8 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
ethertype_ipv4);
entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
- entry_arp0.slab[1] = rte_bswap64((macaddr_src << 16) |
- ethertype_qinq);
+ entry_arp0.slab[1] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_qinq);
entry_arp0.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
entry_arp0.slab[2] = rte_bswap64(macaddr_dst);
@@ -1603,7 +1610,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether QinQ - ARP on */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
uint64_t ethertype_vlan = 0x8100;
uint64_t ethertype_qinq = 0x9100;
@@ -1616,8 +1623,8 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
ethertype_ipv4);
entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
- entry_arp1.slab[1] = rte_bswap64((macaddr_src << 16) |
- ethertype_qinq);
+ entry_arp1.slab[1] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_qinq);
entry_arp1.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
entry_arp1.data_offset = entry_arp1.slab_offset[1] - 6
@@ -1628,7 +1635,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether MPLS - ARP off */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
(p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t macaddr_dst;
uint64_t ethertype_mpls = 0x8847;
@@ -1697,8 +1704,8 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
return rsp;
}
- entry_arp0.slab[2] = rte_bswap64((macaddr_src << 16) |
- ethertype_mpls);
+ entry_arp0.slab[2] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_mpls);
entry_arp0.slab_offset[2] = p_rt->params.ip_hdr_offset -
(n_labels * 4 + 8);
@@ -1714,7 +1721,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether MPLS - ARP on */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t ethertype_mpls = 0x8847;
uint64_t label0 = req->data.l2.mpls.labels[0];
@@ -1779,8 +1786,8 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
return rsp;
}
- entry_arp1.slab[2] = rte_bswap64((macaddr_src << 16) |
- ethertype_mpls);
+ entry_arp1.slab[2] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_mpls);
entry_arp1.slab_offset[2] = p_rt->params.ip_hdr_offset -
(n_labels * 4 + 8);
@@ -1961,10 +1968,25 @@ pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p, void *msg)
return rsp;
}
+void *
+pipeline_routing_msg_req_set_macaddr_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
+ struct pipeline_routing_set_macaddr_msg_req *req = msg;
+ struct pipeline_routing_set_macaddr_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ for (port_id = 0; port_id < p->n_ports_out; port_id++)
+ p_rt->macaddr[port_id] = req->macaddr[port_id];
+
+ rsp->status = 0;
+
+ return rsp;
+}
+
struct pipeline_be_ops pipeline_routing_be_ops = {
.f_init = pipeline_routing_init,
.f_free = pipeline_routing_free,
.f_run = NULL,
.f_timer = pipeline_routing_timer,
- .f_track = pipeline_routing_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.h b/examples/ip_pipeline/pipeline/pipeline_routing_be.h
index ec767b24..12763427 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.h
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.h
@@ -54,6 +54,7 @@ enum pipeline_routing_encap {
struct pipeline_routing_params {
/* routing */
uint32_t n_routes;
+ uint32_t port_local_dest;
/* routing packet encapsulation */
enum pipeline_routing_encap encap;
@@ -160,6 +161,7 @@ enum pipeline_routing_msg_req_type {
PIPELINE_ROUTING_MSG_REQ_ARP_DEL,
PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT,
PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_SET_MACADDR,
PIPELINE_ROUTING_MSG_REQS
};
@@ -291,6 +293,20 @@ struct pipeline_routing_arp_delete_default_msg_rsp {
int status;
};
+/*
+ * MSG SET MACADDR
+ */
+struct pipeline_routing_set_macaddr_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ uint64_t macaddr[PIPELINE_MAX_PORT_OUT];
+};
+
+struct pipeline_routing_set_macaddr_msg_rsp {
+ int status;
+};
+
extern struct pipeline_be_ops pipeline_routing_be_ops;
#endif
diff --git a/examples/ip_pipeline/pipeline_be.h b/examples/ip_pipeline/pipeline_be.h
index f4ff262e..b562472b 100644
--- a/examples/ip_pipeline/pipeline_be.h
+++ b/examples/ip_pipeline/pipeline_be.h
@@ -40,6 +40,9 @@
#include <rte_port_ras.h>
#include <rte_port_sched.h>
#include <rte_port_source_sink.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_port_kni.h>
+#endif
#include <rte_pipeline.h>
enum pipeline_port_in_type {
@@ -49,6 +52,7 @@ enum pipeline_port_in_type {
PIPELINE_PORT_IN_RING_READER_IPV4_FRAG,
PIPELINE_PORT_IN_RING_READER_IPV6_FRAG,
PIPELINE_PORT_IN_SCHED_READER,
+ PIPELINE_PORT_IN_KNI_READER,
PIPELINE_PORT_IN_SOURCE,
};
@@ -61,6 +65,9 @@ struct pipeline_port_in_params {
struct rte_port_ring_reader_ipv4_frag_params ring_ipv4_frag;
struct rte_port_ring_reader_ipv6_frag_params ring_ipv6_frag;
struct rte_port_sched_reader_params sched;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_reader_params kni;
+#endif
struct rte_port_source_params source;
} params;
uint32_t burst_size;
@@ -82,6 +89,10 @@ pipeline_port_in_params_convert(struct pipeline_port_in_params *p)
return (void *) &p->params.ring_ipv6_frag;
case PIPELINE_PORT_IN_SCHED_READER:
return (void *) &p->params.sched;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_IN_KNI_READER:
+ return (void *) &p->params.kni;
+#endif
case PIPELINE_PORT_IN_SOURCE:
return (void *) &p->params.source;
default:
@@ -105,6 +116,10 @@ pipeline_port_in_params_get_ops(struct pipeline_port_in_params *p)
return &rte_port_ring_reader_ipv6_frag_ops;
case PIPELINE_PORT_IN_SCHED_READER:
return &rte_port_sched_reader_ops;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_IN_KNI_READER:
+ return &rte_port_kni_reader_ops;
+#endif
case PIPELINE_PORT_IN_SOURCE:
return &rte_port_source_ops;
default:
@@ -122,6 +137,8 @@ enum pipeline_port_out_type {
PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS,
PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS,
PIPELINE_PORT_OUT_SCHED_WRITER,
+ PIPELINE_PORT_OUT_KNI_WRITER,
+ PIPELINE_PORT_OUT_KNI_WRITER_NODROP,
PIPELINE_PORT_OUT_SINK,
};
@@ -137,6 +154,10 @@ struct pipeline_port_out_params {
struct rte_port_ring_writer_ipv4_ras_params ring_ipv4_ras;
struct rte_port_ring_writer_ipv6_ras_params ring_ipv6_ras;
struct rte_port_sched_writer_params sched;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_writer_params kni;
+ struct rte_port_kni_writer_nodrop_params kni_nodrop;
+#endif
struct rte_port_sink_params sink;
} params;
};
@@ -163,6 +184,12 @@ pipeline_port_out_params_convert(struct pipeline_port_out_params *p)
return (void *) &p->params.ring_ipv6_ras;
case PIPELINE_PORT_OUT_SCHED_WRITER:
return (void *) &p->params.sched;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_OUT_KNI_WRITER:
+ return (void *) &p->params.kni;
+ case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
+ return (void *) &p->params.kni_nodrop;
+#endif
case PIPELINE_PORT_OUT_SINK:
return (void *) &p->params.sink;
default:
@@ -192,6 +219,12 @@ pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
return &rte_port_ring_writer_ipv6_ras_ops;
case PIPELINE_PORT_OUT_SCHED_WRITER:
return &rte_port_sched_writer_ops;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_OUT_KNI_WRITER:
+ return &rte_port_kni_writer_ops;
+ case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
+ return &rte_port_kni_writer_nodrop_ops;
+#endif
case PIPELINE_PORT_OUT_SINK:
return &rte_port_sink_ops;
default:
@@ -200,15 +233,19 @@ pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
}
#ifndef PIPELINE_NAME_SIZE
-#define PIPELINE_NAME_SIZE 32
+#define PIPELINE_NAME_SIZE 64
+#endif
+
+#ifndef PIPELINE_TYPE_SIZE
+#define PIPELINE_TYPE_SIZE 64
#endif
#ifndef PIPELINE_MAX_PORT_IN
-#define PIPELINE_MAX_PORT_IN 16
+#define PIPELINE_MAX_PORT_IN 64
#endif
#ifndef PIPELINE_MAX_PORT_OUT
-#define PIPELINE_MAX_PORT_OUT 16
+#define PIPELINE_MAX_PORT_OUT 64
#endif
#ifndef PIPELINE_MAX_TABLES
@@ -224,11 +261,12 @@ pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
#endif
#ifndef PIPELINE_MAX_ARGS
-#define PIPELINE_MAX_ARGS 32
+#define PIPELINE_MAX_ARGS 64
#endif
struct pipeline_params {
char name[PIPELINE_NAME_SIZE];
+ char type[PIPELINE_TYPE_SIZE];
struct pipeline_port_in_params port_in[PIPELINE_MAX_PORT_IN];
struct pipeline_port_out_params port_out[PIPELINE_MAX_PORT_OUT];
@@ -261,16 +299,11 @@ typedef int (*pipeline_be_op_run)(void *pipeline);
typedef int (*pipeline_be_op_timer)(void *pipeline);
-typedef int (*pipeline_be_op_track)(void *pipeline,
- uint32_t port_in,
- uint32_t *port_out);
-
struct pipeline_be_ops {
pipeline_be_op_init f_init;
pipeline_be_op_free f_free;
pipeline_be_op_run f_run;
pipeline_be_op_timer f_timer;
- pipeline_be_op_track f_track;
};
/* Pipeline specific config parse error messages */
diff --git a/examples/ip_pipeline/thread_fe.c b/examples/ip_pipeline/thread_fe.c
index 4a435f7c..6c547ca5 100644
--- a/examples/ip_pipeline/thread_fe.c
+++ b/examples/ip_pipeline/thread_fe.c
@@ -5,10 +5,6 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
-#include <cmdline_socket.h>
-#include <cmdline.h>
#include "thread.h"
#include "thread_fe.h"
@@ -85,6 +81,9 @@ app_pipeline_enable(struct app_params *app,
p_params = &app->pipeline_params[pipeline_id];
p_type = app_pipeline_type_find(app, p_params->type);
+ if (p_type == NULL)
+ return -1;
+
if (p->enabled == 1)
return -1;
@@ -259,26 +258,26 @@ cmd_pipeline_enable_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_pipeline_enable_t_string =
+static cmdline_parse_token_string_t cmd_pipeline_enable_t_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_string, "t");
-cmdline_parse_token_string_t cmd_pipeline_enable_t_id_string =
+static cmdline_parse_token_string_t cmd_pipeline_enable_t_id_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_id_string,
NULL);
-cmdline_parse_token_string_t cmd_pipeline_enable_pipeline_string =
+static cmdline_parse_token_string_t cmd_pipeline_enable_pipeline_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_string,
"pipeline");
-cmdline_parse_token_num_t cmd_pipeline_enable_pipeline_id =
+static cmdline_parse_token_num_t cmd_pipeline_enable_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_pipeline_enable_enable_string =
+static cmdline_parse_token_string_t cmd_pipeline_enable_enable_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, enable_string,
"enable");
-cmdline_parse_inst_t cmd_pipeline_enable = {
+static cmdline_parse_inst_t cmd_pipeline_enable = {
.f = cmd_pipeline_enable_parsed,
.data = NULL,
.help_str = "Enable pipeline on specified core",
@@ -333,26 +332,26 @@ cmd_pipeline_disable_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_pipeline_disable_t_string =
+static cmdline_parse_token_string_t cmd_pipeline_disable_t_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_string, "t");
-cmdline_parse_token_string_t cmd_pipeline_disable_t_id_string =
+static cmdline_parse_token_string_t cmd_pipeline_disable_t_id_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_id_string,
NULL);
-cmdline_parse_token_string_t cmd_pipeline_disable_pipeline_string =
+static cmdline_parse_token_string_t cmd_pipeline_disable_pipeline_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result,
pipeline_string, "pipeline");
-cmdline_parse_token_num_t cmd_pipeline_disable_pipeline_id =
+static cmdline_parse_token_num_t cmd_pipeline_disable_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_pipeline_disable_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_pipeline_disable_disable_string =
+static cmdline_parse_token_string_t cmd_pipeline_disable_disable_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, disable_string,
"disable");
-cmdline_parse_inst_t cmd_pipeline_disable = {
+static cmdline_parse_inst_t cmd_pipeline_disable = {
.f = cmd_pipeline_disable_parsed,
.data = NULL,
.help_str = "Disable pipeline on specified core",
@@ -405,19 +404,19 @@ cmd_thread_headroom_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_thread_headroom_t_string =
+static cmdline_parse_token_string_t cmd_thread_headroom_t_string =
TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
t_string, "t");
-cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
+static cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
t_id_string, NULL);
-cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
+static cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
headroom_string, "headroom");
-cmdline_parse_inst_t cmd_thread_headroom = {
+static cmdline_parse_inst_t cmd_thread_headroom = {
.f = cmd_thread_headroom_parsed,
.data = NULL,
.help_str = "Display thread headroom",
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index c27e7353..ef09a2ed 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -963,7 +963,7 @@ init_mem(void)
RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
- lpm6 = rte_lpm6_create("IP_RSMBL_LPM6", socket, &lpm6_config);
+ lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
if (lpm6 == NULL) {
RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
return -1;
@@ -1040,9 +1040,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
- else if (nb_ports == 0)
+ if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No ports found!\n");
nb_lcores = rte_lcore_count();
diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile
index f9b59c22..06b6db1e 100644
--- a/examples/ipsec-secgw/Makefile
+++ b/examples/ipsec-secgw/Makefile
@@ -46,15 +46,17 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
CFLAGS_sa.o += -diag-disable=vec
endif
-
-VPATH += $(SRCDIR)/librte_ipsec
+ifeq ($(DEBUG),1)
+CFLAGS += -DIPSEC_DEBUG -fstack-protector-all -O0
+endif
#
# all source are stored in SRCS-y
#
SRCS-y += ipsec.c
SRCS-y += esp.c
-SRCS-y += sp.c
+SRCS-y += sp4.c
+SRCS-y += sp6.c
SRCS-y += sa.c
SRCS-y += rt.c
SRCS-y += ipsec-secgw.c
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 19273807..05caa77a 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -37,11 +37,11 @@
#include <sys/stat.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <fcntl.h>
#include <unistd.h>
#include <rte_common.h>
-#include <rte_memcpy.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_random.h>
@@ -50,15 +50,13 @@
#include "esp.h"
#include "ipip.h"
-#define IP_ESP_HDR_SZ (sizeof(struct ip) + sizeof(struct esp_hdr))
-
static inline void
random_iv_u64(uint64_t *buf, uint16_t n)
{
- unsigned left = n & 0x7;
- unsigned i;
+ uint32_t left = n & 0x7;
+ uint32_t i;
- IPSEC_ASSERT((n & 0x3) == 0);
+ RTE_ASSERT((n & 0x3) == 0);
for (i = 0; i < (n >> 3); i++)
buf[i] = rte_rand();
@@ -67,23 +65,35 @@ random_iv_u64(uint64_t *buf, uint16_t n)
*((uint32_t *)&buf[i]) = (uint32_t)lrand48();
}
-/* IPv4 Tunnel */
int
-esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
- int32_t payload_len;
+ struct ip *ip4;
struct rte_crypto_sym_op *sym_cop;
+ int32_t payload_len, ip_hdr_len;
+
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
+
+ ip4 = rte_pktmbuf_mtod(m, struct ip *);
+ if (likely(ip4->ip_v == IPVERSION))
+ ip_hdr_len = ip4->ip_hl * 4;
+ else if (ip4->ip_v == IP6_VERSION)
+ /* XXX No option headers supported */
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ else {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
+ ip4->ip_v);
+ return -EINVAL;
+ }
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
-
- payload_len = rte_pktmbuf_pkt_len(m) - IP_ESP_HDR_SZ - sa->iv_len -
- sa->digest_len;
+ payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
+ sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
+ RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
payload_len, sa->block_size);
return -EINVAL;
}
@@ -91,21 +101,19 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = IP_ESP_HDR_SZ + sa->iv_len;
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
sym_cop->cipher.data.length = payload_len;
sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, void*,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
- sym_cop->auth.data.offset = sizeof(struct ip);
- if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
- sym_cop->auth.data.length = sizeof(struct esp_hdr);
- else
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
- sa->iv_len + payload_len;
+ sym_cop->auth.data.offset = ip_hdr_len;
+ sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sa->iv_len + payload_len;
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
@@ -117,19 +125,21 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
}
int
-esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
+ struct ip *ip4, *ip;
+ struct ip6_hdr *ip6;
uint8_t *nexthdr, *pad_len;
uint8_t *padding;
uint16_t i;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
return -1;
}
@@ -139,111 +149,187 @@ esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
padding = pad_len - *pad_len;
for (i = 0; i < *pad_len; i++) {
- if (padding[i] != i) {
- IPSEC_LOG(ERR, IPSEC_ESP, "invalid pad_len field\n");
+ if (padding[i] != i + 1) {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
return -EINVAL;
}
}
if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
- IPSEC_LOG(ERR, IPSEC_ESP,
+ RTE_LOG(ERR, IPSEC_ESP,
"failed to remove pad_len + digest\n");
return -EINVAL;
}
- return ip4ip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+ if (unlikely(sa->flags == TRANSPORT)) {
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ ip4 = (struct ip *)rte_pktmbuf_adj(m,
+ sizeof(struct esp_hdr) + sa->iv_len);
+ if (likely(ip->ip_v == IPVERSION)) {
+ memmove(ip4, ip, ip->ip_hl * 4);
+ ip4->ip_p = *nexthdr;
+ ip4->ip_len = htons(rte_pktmbuf_data_len(m));
+ } else {
+ ip6 = (struct ip6_hdr *)ip4;
+ /* XXX No option headers supported */
+ memmove(ip6, ip, sizeof(struct ip6_hdr));
+ ip6->ip6_nxt = *nexthdr;
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ }
+ } else
+ ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+
+ return 0;
}
int
-esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
- uint16_t pad_payload_len, pad_len;
- struct ip *ip;
- struct esp_hdr *esp;
- int i;
- char *padding;
+ struct ip *ip4;
+ struct ip6_hdr *ip6;
+ struct esp_hdr *esp = NULL;
+ uint8_t *padding, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
+ int32_t i;
+ uint16_t pad_payload_len, pad_len, ip_hdr_len;
+
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
+
+ ip_hdr_len = 0;
+
+ ip4 = rte_pktmbuf_mtod(m, struct ip *);
+ if (likely(ip4->ip_v == IPVERSION)) {
+ if (unlikely(sa->flags == TRANSPORT)) {
+ ip_hdr_len = ip4->ip_hl * 4;
+ nlp = ip4->ip_p;
+ } else
+ nlp = IPPROTO_IPIP;
+ } else if (ip4->ip_v == IP6_VERSION) {
+ if (unlikely(sa->flags == TRANSPORT)) {
+ /* XXX No option headers supported */
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ ip6 = (struct ip6_hdr *)ip4;
+ nlp = ip6->ip6_nxt;
+ } else
+ nlp = IPPROTO_IPV6;
+ } else {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
+ ip4->ip_v);
+ return -EINVAL;
+ }
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
-
- /* Payload length */
- pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) + 2,
- sa->block_size);
- pad_len = pad_payload_len - rte_pktmbuf_pkt_len(m);
-
- rte_prefetch0(rte_pktmbuf_mtod_offset(m, void *,
- rte_pktmbuf_pkt_len(m)));
+ /* Padded payload length */
+ pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
+ ip_hdr_len + 2, sa->block_size);
+ pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
+
+ RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
+ sa->flags == TRANSPORT);
+
+ if (likely(sa->flags == IP4_TUNNEL))
+ ip_hdr_len = sizeof(struct ip);
+ else if (sa->flags == IP6_TUNNEL)
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ else if (sa->flags != TRANSPORT) {
+ RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
+ sa->flags);
+ return -EINVAL;
+ }
/* Check maximum packet size */
- if (unlikely(IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len +
- sa->digest_len > IP_MAXPACKET)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "ipsec packet is too big\n");
+ if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
+ pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
+ RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
return -EINVAL;
}
- padding = rte_pktmbuf_append(m, pad_len + sa->digest_len);
-
- IPSEC_ASSERT(padding != NULL);
-
- ip = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
- sa->src, sa->dst);
+ padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
+ if (unlikely(padding == NULL)) {
+ RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
+ return -ENOSPC;
+ }
+ rte_prefetch0(padding);
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ &sa->src, &sa->dst);
+ esp = (struct esp_hdr *)(ip4 + 1);
+ break;
+ case IP6_TUNNEL:
+ ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ &sa->src, &sa->dst);
+ esp = (struct esp_hdr *)(ip6 + 1);
+ break;
+ case TRANSPORT:
+ new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
+ sizeof(struct esp_hdr) + sa->iv_len);
+ memmove(new_ip, ip4, ip_hdr_len);
+ esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+ if (likely(ip4->ip_v == IPVERSION)) {
+ ip4 = (struct ip *)new_ip;
+ ip4->ip_p = IPPROTO_ESP;
+ ip4->ip_len = htons(rte_pktmbuf_data_len(m));
+ } else {
+ ip6 = (struct ip6_hdr *)new_ip;
+ ip6->ip6_nxt = IPPROTO_ESP;
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ }
+ }
- esp = (struct esp_hdr *)(ip + 1);
- esp->spi = sa->spi;
- esp->seq = htonl(sa->seq++);
+ sa->seq++;
+ esp->spi = rte_cpu_to_be_32(sa->spi);
+ esp->seq = rte_cpu_to_be_32(sa->seq);
- IPSEC_LOG(DEBUG, IPSEC_ESP, "pktlen %u\n", rte_pktmbuf_pkt_len(m));
+ if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
+ random_iv_u64((uint64_t *)(esp + 1), sa->iv_len);
/* Fill pad_len using default sequential scheme */
for (i = 0; i < pad_len - 2; i++)
padding[i] = i + 1;
-
padding[pad_len - 2] = pad_len - 2;
- padding[pad_len - 1] = IPPROTO_IPIP;
+ padding[pad_len - 1] = nlp;
sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = IP_ESP_HDR_SZ + sa->iv_len;
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
- sym_cop->auth.data.offset = sizeof(struct ip);
+ sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len +
pad_payload_len;
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len);
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len);
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.length = sa->digest_len;
- if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
- random_iv_u64((uint64_t *)sym_cop->cipher.iv.data,
- sym_cop->cipher.iv.length);
-
return 0;
}
int
-esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m __rte_unused,
+esp_outbound_post(struct rte_mbuf *m __rte_unused,
struct ipsec_sa *sa __rte_unused,
struct rte_crypto_op *cop)
{
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
return -1;
}
diff --git a/examples/ipsec-secgw/esp.h b/examples/ipsec-secgw/esp.h
index 31018823..fa5cc8af 100644
--- a/examples/ipsec-secgw/esp.h
+++ b/examples/ipsec-secgw/esp.h
@@ -46,21 +46,20 @@ struct esp_hdr {
/* Integrity Check Value - ICV */
};
-/* IPv4 Tunnel */
int
-esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
#endif /* __RTE_IPSEC_XFORM_ESP_H__ */
diff --git a/examples/ipsec-secgw/ipip.h b/examples/ipsec-secgw/ipip.h
index 322076ce..ce25a2e2 100644
--- a/examples/ipsec-secgw/ipip.h
+++ b/examples/ipsec-secgw/ipip.h
@@ -37,67 +37,144 @@
#include <stdint.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <rte_mbuf.h>
-#define IPV6_VERSION (6)
-
-static inline struct ip *
-ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t src, uint32_t dst)
+static inline void *
+ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6,
+ struct ip_addr *src, struct ip_addr *dst)
{
- struct ip *inip, *outip;
+ struct ip *inip4, *outip4;
+ struct ip6_hdr *inip6, *outip6;
+ uint8_t ds_ecn;
- inip = rte_pktmbuf_mtod(m, struct ip*);
+ inip4 = rte_pktmbuf_mtod(m, struct ip *);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION);
- offset += sizeof(struct ip);
+ if (inip4->ip_v == IPVERSION) {
+ /* XXX This should be done by the forwarding engine instead */
+ inip4->ip_ttl -= 1;
+ ds_ecn = inip4->ip_tos;
+ } else {
+ inip6 = (struct ip6_hdr *)inip4;
+ /* XXX This should be done by the forwarding engine instead */
+ inip6->ip6_hops -= 1;
+ ds_ecn = ntohl(inip6->ip6_flow) >> 20;
+ }
+
+ if (is_ipv6) {
+ offset += sizeof(struct ip6_hdr);
+ outip6 = (struct ip6_hdr *)rte_pktmbuf_prepend(m, offset);
+
+ RTE_ASSERT(outip6 != NULL);
+
+ /* Per RFC4301 5.1.2.1 */
+ outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20);
+ outip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+
+ outip6->ip6_nxt = IPPROTO_ESP;
+ outip6->ip6_hops = IPDEFTTL;
- outip = (struct ip *)rte_pktmbuf_prepend(m, offset);
+ memcpy(&outip6->ip6_src.s6_addr, src, 16);
+ memcpy(&outip6->ip6_dst.s6_addr, dst, 16);
- IPSEC_ASSERT(outip != NULL);
+ return outip6;
+ }
+
+ offset += sizeof(struct ip);
+ outip4 = (struct ip *)rte_pktmbuf_prepend(m, offset);
+
+ RTE_ASSERT(outip4 != NULL);
/* Per RFC4301 5.1.2.1 */
- outip->ip_v = IPVERSION;
- outip->ip_hl = 5;
- outip->ip_tos = inip->ip_tos;
- outip->ip_len = htons(rte_pktmbuf_data_len(m));
+ outip4->ip_v = IPVERSION;
+ outip4->ip_hl = 5;
+ outip4->ip_tos = ds_ecn;
+ outip4->ip_len = htons(rte_pktmbuf_data_len(m));
+
+ outip4->ip_id = 0;
+ outip4->ip_off = 0;
+
+ outip4->ip_ttl = IPDEFTTL;
+ outip4->ip_p = IPPROTO_ESP;
+
+ outip4->ip_src.s_addr = src->ip4;
+ outip4->ip_dst.s_addr = dst->ip4;
- outip->ip_id = 0;
- outip->ip_off = 0;
+ return outip4;
+}
+
+static inline struct ip *
+ip4ip_outbound(struct rte_mbuf *m, uint32_t offset,
+ struct ip_addr *src, struct ip_addr *dst)
+{
+ return ipip_outbound(m, offset, 0, src, dst);
+}
- outip->ip_ttl = IPDEFTTL;
- outip->ip_p = IPPROTO_ESP;
+static inline struct ip6_hdr *
+ip6ip_outbound(struct rte_mbuf *m, uint32_t offset,
+ struct ip_addr *src, struct ip_addr *dst)
+{
+ return ipip_outbound(m, offset, 1, src, dst);
+}
- outip->ip_src.s_addr = src;
- outip->ip_dst.s_addr = dst;
+static inline void
+ip4_ecn_setup(struct ip *ip4)
+{
+ if (ip4->ip_tos & IPTOS_ECN_MASK)
+ ip4->ip_tos |= IPTOS_ECN_CE;
+}
- return outip;
+static inline void
+ip6_ecn_setup(struct ip6_hdr *ip6)
+{
+ if ((ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK)
+ ip6->ip6_flow = htonl(ntohl(ip6->ip6_flow) |
+ (IPTOS_ECN_CE << 20));
}
-static inline int
-ip4ip_inbound(struct rte_mbuf *m, uint32_t offset)
+static inline void
+ipip_inbound(struct rte_mbuf *m, uint32_t offset)
{
- struct ip *inip;
- struct ip *outip;
+ struct ip *inip4, *outip4;
+ struct ip6_hdr *inip6, *outip6;
+ uint32_t ip_len, set_ecn;
- outip = rte_pktmbuf_mtod(m, struct ip*);
+ outip4 = rte_pktmbuf_mtod(m, struct ip*);
- IPSEC_ASSERT(outip->ip_v == IPVERSION);
+ RTE_ASSERT(outip4->ip_v == IPVERSION || outip4->ip_v == IP6_VERSION);
- offset += sizeof(struct ip);
- inip = (struct ip *)rte_pktmbuf_adj(m, offset);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ if (outip4->ip_v == IPVERSION) {
+ ip_len = sizeof(struct ip);
+ set_ecn = ((outip4->ip_tos & IPTOS_ECN_CE) == IPTOS_ECN_CE);
+ } else {
+ outip6 = (struct ip6_hdr *)outip4;
+ ip_len = sizeof(struct ip6_hdr);
+ set_ecn = ntohl(outip6->ip6_flow) >> 20;
+ set_ecn = ((set_ecn & IPTOS_ECN_CE) == IPTOS_ECN_CE);
+ }
+
+ inip4 = (struct ip *)rte_pktmbuf_adj(m, offset + ip_len);
+ RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION);
/* Check packet is still bigger than IP header (inner) */
- IPSEC_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
+ RTE_ASSERT(rte_pktmbuf_pkt_len(m) > ip_len);
/* RFC4301 5.1.2.1 Note 6 */
- if ((inip->ip_tos & htons(IPTOS_ECN_ECT0 | IPTOS_ECN_ECT1)) &&
- ((outip->ip_tos & htons(IPTOS_ECN_CE)) == IPTOS_ECN_CE))
- inip->ip_tos |= htons(IPTOS_ECN_CE);
-
- return 0;
+ if (inip4->ip_v == IPVERSION) {
+ if (set_ecn)
+ ip4_ecn_setup(inip4);
+ /* XXX This should be done by the forwarding engine instead */
+ inip4->ip_ttl -= 1;
+ } else {
+ inip6 = (struct ip6_hdr *)inip4;
+ if (set_ecn)
+ ip6_ecn_setup(inip6);
+ /* XXX This should be done by the forwarding engine instead */
+ inip6->ip6_hops -= 1;
+ }
}
#endif /* __IPIP_H__ */
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 00ab2d84..f78743d0 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -38,6 +38,7 @@
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
@@ -65,6 +66,7 @@
#include <rte_mbuf.h>
#include <rte_acl.h>
#include <rte_lpm.h>
+#include <rte_lpm6.h>
#include <rte_hash.h>
#include <rte_jhash.h>
#include <rte_cryptodev.h>
@@ -192,7 +194,8 @@ struct lcore_conf {
struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
struct ipsec_ctx inbound;
struct ipsec_ctx outbound;
- struct rt_ctx *rt_ctx;
+ struct rt_ctx *rt4_ctx;
+ struct rt_ctx *rt6_ctx;
} __rte_cache_aligned;
static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
@@ -230,27 +233,39 @@ struct traffic_type {
};
struct ipsec_traffic {
- struct traffic_type ipsec4;
- struct traffic_type ipv4;
+ struct traffic_type ipsec;
+ struct traffic_type ip4;
+ struct traffic_type ip6;
};
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
uint8_t *nlp;
+ struct ether_hdr *eth;
- if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
- rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
- nlp = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
- offsetof(struct ip, ip_p));
+ eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+ nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
if (*nlp == IPPROTO_ESP)
- t->ipsec4.pkts[(t->ipsec4.num)++] = pkt;
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
else {
- t->ipv4.data[t->ipv4.num] = nlp;
- t->ipv4.pkts[(t->ipv4.num)++] = pkt;
+ t->ip4.data[t->ip4.num] = nlp;
+ t->ip4.pkts[(t->ip4.num)++] = pkt;
+ }
+ } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+ nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+ nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
+ if (*nlp == IPPROTO_ESP)
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ else {
+ t->ip6.data[t->ip6.num] = nlp;
+ t->ip6.pkts[(t->ip6.num)++] = pkt;
}
} else {
/* Unknown/Unsupported type, drop the packet */
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
rte_pktmbuf_free(pkt);
}
}
@@ -261,8 +276,9 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
{
int32_t i;
- t->ipsec4.num = 0;
- t->ipv4.num = 0;
+ t->ipsec.num = 0;
+ t->ip4.num = 0;
+ t->ip6.num = 0;
for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
@@ -277,14 +293,27 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
static inline void
prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
{
- pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = ETHER_HDR_LEN;
+ struct ip *ip;
+ struct ether_hdr *ethhdr;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+ ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
- struct ether_hdr *ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt,
- ETHER_HDR_LEN);
+ if (ip->ip_v == IPVERSION) {
+ pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
+ pkt->l3_len = sizeof(struct ip);
+ pkt->l2_len = ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ } else {
+ pkt->ol_flags |= PKT_TX_IPV6;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ }
- ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
memcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,
sizeof(struct ether_addr));
memcpy(&ethhdr->d_addr, &ethaddr_tbl[port].dst,
@@ -298,7 +327,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
const int32_t prefetch_offset = 2;
for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_prefetch0(pkts[i + prefetch_offset]->cacheline1);
+ rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
prepare_tx_pkt(pkts[i], port);
}
/* Process left packets */
@@ -355,94 +384,133 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
}
static inline void
-process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
- struct ipsec_traffic *traffic)
+inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip)
{
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_in, i, j;
- uint32_t sa_idx, res;
-
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.num, MAX_PKT_BURST);
+ uint32_t i, j, res, sa_idx;
- /* SP/ACL Inbound check ipsec and ipv4 */
- for (i = 0; i < nb_pkts_in; i++) {
- idx = traffic->ipv4.num++;
- m = traffic->ipsec4.pkts[i];
- traffic->ipv4.pkts[idx] = m;
- traffic->ipv4.data[idx] = rte_pktmbuf_mtod_offset(m,
- uint8_t *, offsetof(struct ip, ip_p));
- }
+ if (ip->num == 0)
+ return;
- rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
- traffic->ipv4.data, traffic->ipv4.res,
- traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
j = 0;
- for (i = 0; i < traffic->ipv4.num - nb_pkts_in; i++) {
- m = traffic->ipv4.pkts[i];
- res = traffic->ipv4.res[i];
- if (res & ~BYPASS) {
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (res & DISCARD) {
rte_pktmbuf_free(m);
continue;
}
- traffic->ipv4.pkts[j++] = m;
- }
- /* Check return SA SPI matches pkt SPI */
- for ( ; i < traffic->ipv4.num; i++) {
- m = traffic->ipv4.pkts[i];
- sa_idx = traffic->ipv4.res[i] & PROTECT_MASK;
- if (sa_idx == 0 || !inbound_sa_check(ipsec_ctx->sa_ctx,
- m, sa_idx)) {
+ if (res & BYPASS) {
+ ip->pkts[j++] = m;
+ continue;
+ }
+ /* Check return SA SPI matches pkt SPI */
+ sa_idx = ip->res[i] & PROTECT_MASK;
+ if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
rte_pktmbuf_free(m);
continue;
}
- traffic->ipv4.pkts[j++] = m;
+ ip->pkts[j++] = m;
}
- traffic->ipv4.num = j;
+ ip->num = j;
}
static inline void
-process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
+process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_out, i, j;
- uint32_t sa_idx, res;
+ uint16_t idx, nb_pkts_in, i;
- rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
- traffic->ipv4.data, traffic->ipv4.res,
- traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
- /* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec4.num; i++)
- rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
+ /* SP/ACL Inbound check ipsec and ip4 */
+ for (i = 0; i < nb_pkts_in; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
+ uint8_t *, offsetof(struct ip, ip_p));
+ } else if (ip->ip_v == IP6_VERSION) {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
+ uint8_t *,
+ offsetof(struct ip6_hdr, ip6_nxt));
+ } else
+ rte_pktmbuf_free(m);
+ }
+
+ inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4);
+
+ inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6);
+}
+
+static inline void
+outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
+ struct traffic_type *ipsec)
+{
+ struct rte_mbuf *m;
+ uint32_t i, j, sa_idx;
+
+ if (ip->num == 0)
+ return;
- traffic->ipsec4.num = 0;
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
j = 0;
- for (i = 0; i < traffic->ipv4.num; i++) {
- m = traffic->ipv4.pkts[i];
- res = traffic->ipv4.res[i];
- sa_idx = res & PROTECT_MASK;
- if ((res == 0) || (res & DISCARD))
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ sa_idx = ip->res[i] & PROTECT_MASK;
+ if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
rte_pktmbuf_free(m);
else if (sa_idx != 0) {
- traffic->ipsec4.res[traffic->ipsec4.num] = sa_idx;
- traffic->ipsec4.pkts[traffic->ipsec4.num++] = m;
+ ipsec->res[ipsec->num] = sa_idx;
+ ipsec->pkts[ipsec->num++] = m;
} else /* BYPASS */
- traffic->ipv4.pkts[j++] = m;
+ ip->pkts[j++] = m;
}
- traffic->ipv4.num = j;
+ ip->num = j;
+}
+
+static inline void
+process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
+ struct ipsec_traffic *traffic)
+{
+ struct rte_mbuf *m;
+ uint16_t idx, nb_pkts_out, i;
+
+ /* Drop any IPsec traffic from protected ports */
+ for (i = 0; i < traffic->ipsec.num; i++)
+ rte_pktmbuf_free(traffic->ipsec.pkts[i]);
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.res, traffic->ipsec4.num,
+ traffic->ipsec.num = 0;
+
+ outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
+
+ outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.res, traffic->ipsec.num,
MAX_PKT_BURST);
for (i = 0; i < nb_pkts_out; i++) {
- idx = traffic->ipv4.num++;
- m = traffic->ipsec4.pkts[i];
- traffic->ipv4.pkts[idx] = m;
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
}
}
@@ -450,47 +518,72 @@ static inline void
process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
- uint16_t nb_pkts_in, i;
+ struct rte_mbuf *m;
+ uint32_t nb_pkts_in, i, idx;
/* Drop any IPv4 traffic from unprotected ports */
- for (i = 0; i < traffic->ipv4.num; i++)
- rte_pktmbuf_free(traffic->ipv4.pkts[i]);
+ for (i = 0; i < traffic->ip4.num; i++)
+ rte_pktmbuf_free(traffic->ip4.pkts[i]);
- traffic->ipv4.num = 0;
+ traffic->ip4.num = 0;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.num, MAX_PKT_BURST);
+ /* Drop any IPv6 traffic from unprotected ports */
+ for (i = 0; i < traffic->ip6.num; i++)
+ rte_pktmbuf_free(traffic->ip6.pkts[i]);
- for (i = 0; i < nb_pkts_in; i++)
- traffic->ipv4.pkts[i] = traffic->ipsec4.pkts[i];
+ traffic->ip6.num = 0;
- traffic->ipv4.num = nb_pkts_in;
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+
+ for (i = 0; i < nb_pkts_in; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
+ }
}
static inline void
process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
- uint16_t nb_pkts_out, i;
+ struct rte_mbuf *m;
+ uint32_t nb_pkts_out, i;
+ struct ip *ip;
/* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec4.num; i++)
- rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
+ for (i = 0; i < traffic->ipsec.num; i++)
+ rte_pktmbuf_free(traffic->ipsec.pkts[i]);
- traffic->ipsec4.num = 0;
+ traffic->ipsec.num = 0;
- for (i = 0; i < traffic->ipv4.num; i++)
- traffic->ipv4.res[i] = single_sa_idx;
+ for (i = 0; i < traffic->ip4.num; i++)
+ traffic->ip4.res[i] = single_sa_idx;
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipv4.pkts,
- traffic->ipv4.res, traffic->ipv4.num,
+ for (i = 0; i < traffic->ip6.num; i++)
+ traffic->ip6.res[i] = single_sa_idx;
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
+ traffic->ip4.res, traffic->ip4.num,
MAX_PKT_BURST);
- traffic->ipv4.num = nb_pkts_out;
+ /* They all sue the same SA (ip4 or ip6 tunnel) */
+ m = traffic->ipsec.pkts[i];
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION)
+ traffic->ip4.num = nb_pkts_out;
+ else
+ traffic->ip6.num = nb_pkts_out;
}
static inline void
-route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
@@ -518,6 +611,35 @@ route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
static inline void
+route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ int16_t hop[MAX_PKT_BURST * 2];
+ uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ uint8_t *ip6_dst;
+ uint16_t i, offset;
+
+ if (nb_pkts == 0)
+ return;
+
+ for (i = 0; i < nb_pkts; i++) {
+ offset = offsetof(struct ip6_hdr, ip6_dst);
+ ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset);
+ memcpy(&dst_ip[i][0], ip6_dst, 16);
+ }
+
+ rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip,
+ hop, nb_pkts);
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (hop[i] == -1) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+ send_single_packet(pkts[i], hop[i] & 0xff);
+ }
+}
+
+static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
uint8_t nb_pkts, uint8_t portid)
{
@@ -525,7 +647,7 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
prepare_traffic(pkts, &traffic, nb_pkts);
- if (single_sa) {
+ if (unlikely(single_sa)) {
if (UNPROTECTED_PORT(portid))
process_pkts_inbound_nosp(&qconf->inbound, &traffic);
else
@@ -537,7 +659,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
process_pkts_outbound(&qconf->outbound, &traffic);
}
- route_pkts(qconf->rt_ctx, traffic.ipv4.pkts, traffic.ipv4.num);
+ route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+ route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
static inline void
@@ -576,12 +699,15 @@ main_loop(__attribute__((unused)) void *dummy)
rxql = qconf->rx_queue_list;
socket_id = rte_lcore_to_socket_id(lcore_id);
- qconf->rt_ctx = socket_ctx[socket_id].rt_ipv4;
- qconf->inbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_in;
- qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_in;
+ qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
+ qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
+ qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
+ qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
+ qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
qconf->inbound.cdev_map = cdev_map_in;
- qconf->outbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_out;
- qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_out;
+ qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
+ qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
+ qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
qconf->outbound.cdev_map = cdev_map_out;
if (qconf->nb_rx_queue == 0) {
@@ -636,8 +762,6 @@ check_params(void)
}
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -762,7 +886,7 @@ parse_config(const char *q_arg)
FLD_LCORE,
_NUM_FLD
};
- int long int_fld[_NUM_FLD];
+ unsigned long int_fld[_NUM_FLD];
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
@@ -1286,8 +1410,6 @@ main(int32_t argc, char **argv)
unprotected_port_mask);
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_params() < 0)
rte_exit(EXIT_FAILURE, "check_params failed\n");
@@ -1313,7 +1435,9 @@ main(int32_t argc, char **argv)
sa_init(&socket_ctx[socket_id], socket_id, ep);
- sp_init(&socket_ctx[socket_id], socket_id, ep);
+ sp4_init(&socket_ctx[socket_id], socket_id, ep);
+
+ sp6_init(&socket_ctx[socket_id], socket_id, ep);
rt_init(&socket_ctx[socket_id], socket_id, ep);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index baf30d4b..1e87d0df 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -42,11 +42,12 @@
#include <rte_hash.h>
#include "ipsec.h"
+#include "esp.h"
static inline int
create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
{
- uint32_t cdev_id_qp = 0;
+ unsigned long cdev_id_qp = 0;
int32_t ret;
struct cdev_key key = { 0 };
@@ -58,14 +59,15 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
(void **)&cdev_id_qp);
if (ret < 0) {
- IPSEC_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
+ RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u\n", key.lcore_id, key.cipher_algo,
key.auth_algo);
return -1;
}
- IPSEC_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
- "%u qp %u\n", sa->spi, ipsec_ctx->tbl[cdev_id_qp].id,
+ RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
+ "%u qp %u\n", sa->spi,
+ ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
sa->crypto_session = rte_cryptodev_sym_session_create(
@@ -79,7 +81,7 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
static inline void
enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
{
- int ret, i;
+ int32_t ret, i;
cqp->buf[cqp->len++] = cop;
@@ -87,7 +89,7 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
cqp->buf, cqp->len);
if (ret < cqp->len) {
- IPSEC_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
+ RTE_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
cqp->id, cqp->qp,
ret, cqp->len);
@@ -99,17 +101,21 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
}
}
-static inline uint16_t
-ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
- struct ipsec_sa *sas[], uint16_t nb_pkts, uint16_t max_pkts)
+static inline void
+ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ struct rte_mbuf *pkts[], struct ipsec_sa *sas[],
+ uint16_t nb_pkts)
{
- int ret = 0, i, j, nb_cops;
+ int32_t ret = 0, i;
struct ipsec_mbuf_metadata *priv;
- struct rte_crypto_op *cops[max_pkts];
struct ipsec_sa *sa;
- struct rte_mbuf *pkt;
for (i = 0; i < nb_pkts; i++) {
+ if (unlikely(sas[i] == NULL)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
rte_prefetch0(sas[i]);
rte_prefetch0(pkts[i]);
@@ -117,8 +123,6 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
sa = sas[i];
priv->sa = sa;
- IPSEC_ASSERT(sa != NULL);
-
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
rte_prefetch0(&priv->sym_cop);
@@ -133,17 +137,27 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
rte_crypto_op_attach_sym_session(&priv->cop,
sa->crypto_session);
- ret = sa->pre_crypto(pkts[i], sa, &priv->cop);
+ ret = xform_func(pkts[i], sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
- IPSEC_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
+ RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
}
+}
+
+static inline int
+ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ struct rte_mbuf *pkts[], uint16_t max_pkts)
+{
+ int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_crypto_op *cops[max_pkts];
+ struct ipsec_sa *sa;
+ struct rte_mbuf *pkt;
- nb_pkts = 0;
for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
struct cdev_qp *cqp;
@@ -166,9 +180,9 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
priv = get_priv(pkt);
sa = priv->sa;
- IPSEC_ASSERT(sa != NULL);
+ RTE_ASSERT(sa != NULL);
- ret = sa->post_crypto(pkt, sa, cops[j]);
+ ret = xform_func(pkt, sa, cops[j]);
if (unlikely(ret))
rte_pktmbuf_free(pkt);
else
@@ -188,7 +202,9 @@ ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
- return ipsec_processing(ctx, pkts, sas, nb_pkts, len);
+ ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
+
+ return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
}
uint16_t
@@ -199,5 +215,7 @@ ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
- return ipsec_processing(ctx, pkts, sas, nb_pkts, len);
+ ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
+
+ return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
}
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index a13fdef9..0d2ee254 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -37,7 +37,6 @@
#include <stdint.h>
#include <rte_byteorder.h>
-#include <rte_ip.h>
#include <rte_crypto.h>
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
@@ -47,30 +46,18 @@
#define MAX_PKT_BURST 32
#define MAX_QP_PER_LCORE 256
-#ifdef IPSEC_DEBUG
-#define IPSEC_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
-#define IPSEC_LOG RTE_LOG
-#else
-#define IPSEC_ASSERT(exp) do {} while (0)
-#define IPSEC_LOG(...) do {} while (0)
-#endif /* IPSEC_DEBUG */
-
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
#define uint32_t_to_char(ip, a, b, c, d) do {\
- *a = (unsigned char)(ip >> 24 & 0xff);\
- *b = (unsigned char)(ip >> 16 & 0xff);\
- *c = (unsigned char)(ip >> 8 & 0xff);\
- *d = (unsigned char)(ip & 0xff);\
+ *a = (uint8_t)(ip >> 24 & 0xff);\
+ *b = (uint8_t)(ip >> 16 & 0xff);\
+ *c = (uint8_t)(ip >> 8 & 0xff);\
+ *d = (uint8_t)(ip & 0xff);\
} while (0)
#define DEFAULT_MAX_CATEGORIES 1
-#define IPSEC_SA_MAX_ENTRIES (64) /* must be power of 2, max 2 power 30 */
+#define IPSEC_SA_MAX_ENTRIES (128) /* must be power of 2, max 2 power 30 */
#define SPI2IDX(spi) (spi & (IPSEC_SA_MAX_ENTRIES - 1))
#define INVALID_SPI (0)
@@ -81,6 +68,8 @@ if (!(exp)) { \
#define IPSEC_XFORM_MAX 2
+#define IP6_VERSION (6)
+
struct rte_crypto_xform;
struct ipsec_xform;
struct rte_cryptodev_session;
@@ -88,25 +77,36 @@ struct rte_mbuf;
struct ipsec_sa;
-typedef int (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa,
+typedef int32_t (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
+struct ip_addr {
+ union {
+ uint32_t ip4;
+ union {
+ uint64_t ip6[2];
+ uint8_t ip6_b[16];
+ };
+ };
+};
+
struct ipsec_sa {
uint32_t spi;
uint32_t cdev_id_qp;
- uint32_t src;
- uint32_t dst;
struct rte_cryptodev_sym_session *crypto_session;
- struct rte_crypto_sym_xform *xforms;
- ipsec_xform_fn pre_crypto;
- ipsec_xform_fn post_crypto;
+ uint32_t seq;
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
uint16_t digest_len;
uint16_t iv_len;
uint16_t block_size;
uint16_t flags;
- uint32_t seq;
+#define IP4_TUNNEL (1 << 0)
+#define IP6_TUNNEL (1 << 1)
+#define TRANSPORT (1 << 2)
+ struct ip_addr src;
+ struct ip_addr dst;
+ struct rte_crypto_sym_xform *xforms;
} __rte_cache_aligned;
struct ipsec_mbuf_metadata {
@@ -125,7 +125,8 @@ struct cdev_qp {
struct ipsec_ctx {
struct rte_hash *cdev_map;
- struct sp_ctx *sp_ctx;
+ struct sp_ctx *sp4_ctx;
+ struct sp_ctx *sp6_ctx;
struct sa_ctx *sa_ctx;
uint16_t nb_qps;
uint16_t last_qp;
@@ -139,11 +140,14 @@ struct cdev_key {
};
struct socket_ctx {
- struct sa_ctx *sa_ipv4_in;
- struct sa_ctx *sa_ipv4_out;
- struct sp_ctx *sp_ipv4_in;
- struct sp_ctx *sp_ipv4_out;
- struct rt_ctx *rt_ipv4;
+ struct sa_ctx *sa_in;
+ struct sa_ctx *sa_out;
+ struct sp_ctx *sp_ip4_in;
+ struct sp_ctx *sp_ip4_out;
+ struct sp_ctx *sp_ip6_in;
+ struct sp_ctx *sp_ip6_out;
+ struct rt_ctx *rt_ip4;
+ struct rt_ctx *rt_ip6;
struct rte_mempool *mbuf_pool;
};
@@ -179,12 +183,15 @@ outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
struct ipsec_sa *sa[], uint16_t nb_pkts);
void
-sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
+
+void
+sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
void
-sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
void
-rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
#endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/rt.c b/examples/ipsec-secgw/rt.c
index a6d0866a..fa5f0420 100644
--- a/examples/ipsec-secgw/rt.c
+++ b/examples/ipsec-secgw/rt.c
@@ -36,110 +36,237 @@
*/
#include <sys/types.h>
#include <rte_lpm.h>
+#include <rte_lpm6.h>
#include <rte_errno.h>
+#include <rte_ip.h>
#include "ipsec.h"
-#define RT_IPV4_MAX_RULES 64
+#define RT_IPV4_MAX_RULES 1024
+#define RT_IPV6_MAX_RULES 1024
-struct ipv4_route {
+struct ip4_route {
uint32_t ip;
- uint8_t depth;
- uint8_t if_out;
+ uint8_t depth;
+ uint8_t if_out;
};
-/* In the default routing table we have:
- * ep0 protected ports 0 and 1, and unprotected ports 2 and 3.
- */
-static struct ipv4_route rt_ipv4_ep0[] = {
+struct ip6_route {
+ uint8_t ip[16];
+ uint8_t depth;
+ uint8_t if_out;
+};
+
+static struct ip4_route rt_ip4_ep0[] = {
+ /* Outbound */
+ /* Tunnels */
{ IPv4(172, 16, 2, 5), 32, 0 },
- { IPv4(172, 16, 2, 6), 32, 0 },
- { IPv4(172, 16, 2, 7), 32, 1 },
- { IPv4(172, 16, 2, 8), 32, 1 },
+ { IPv4(172, 16, 2, 6), 32, 1 },
+ /* Transport */
+ { IPv4(192, 168, 175, 0), 24, 0 },
+ { IPv4(192, 168, 176, 0), 24, 1 },
+ /* Bypass */
+ { IPv4(192, 168, 240, 0), 24, 0 },
+ { IPv4(192, 168, 241, 0), 24, 1 },
+ /* Inbound */
+ /* Tunnels */
{ IPv4(192, 168, 115, 0), 24, 2 },
- { IPv4(192, 168, 116, 0), 24, 2 },
- { IPv4(192, 168, 117, 0), 24, 3 },
- { IPv4(192, 168, 118, 0), 24, 3 },
-
+ { IPv4(192, 168, 116, 0), 24, 3 },
+ { IPv4(192, 168, 65, 0), 24, 2 },
+ { IPv4(192, 168, 66, 0), 24, 3 },
+ /* Transport */
+ { IPv4(192, 168, 185, 0), 24, 2 },
+ { IPv4(192, 168, 186, 0), 24, 3 },
+ /* NULL */
{ IPv4(192, 168, 210, 0), 24, 2 },
+ { IPv4(192, 168, 211, 0), 24, 3 },
+ /* Bypass */
+ { IPv4(192, 168, 245, 0), 24, 2 },
+ { IPv4(192, 168, 246, 0), 24, 3 },
+};
- { IPv4(192, 168, 240, 0), 24, 2 },
- { IPv4(192, 168, 250, 0), 24, 0 }
+static struct ip6_route rt_ip6_ep0[] = {
+ /* Outbound */
+ /* Tunnels */
+ { { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 }, 116, 0 },
+ { { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 }, 116, 1 },
+ /* Transport */
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 0 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 1 },
+ /* Inbound */
+ /* Tunnels */
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb,
+ 0xbb, 0xbb, 0xbb, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66,
+ 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ /* Transport */
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
};
-/* In the default routing table we have:
- * ep1 protected ports 0 and 1, and unprotected ports 2 and 3.
- */
-static struct ipv4_route rt_ipv4_ep1[] = {
- { IPv4(172, 16, 1, 5), 32, 2 },
- { IPv4(172, 16, 1, 6), 32, 2 },
- { IPv4(172, 16, 1, 7), 32, 3 },
- { IPv4(172, 16, 1, 8), 32, 3 },
+static struct ip4_route rt_ip4_ep1[] = {
+ /* Outbound */
+ /* Tunnels */
+ { IPv4(172, 16, 1, 5), 32, 0 },
+ { IPv4(172, 16, 1, 6), 32, 1 },
+ /* Transport */
+ { IPv4(192, 168, 185, 0), 24, 0 },
+ { IPv4(192, 168, 186, 0), 24, 1 },
+ /* Bypass */
+ { IPv4(192, 168, 245, 0), 24, 0 },
+ { IPv4(192, 168, 246, 0), 24, 1 },
- { IPv4(192, 168, 105, 0), 24, 0 },
- { IPv4(192, 168, 106, 0), 24, 0 },
- { IPv4(192, 168, 107, 0), 24, 1 },
- { IPv4(192, 168, 108, 0), 24, 1 },
+ /* Inbound */
+ /* Tunnels */
+ { IPv4(192, 168, 105, 0), 24, 2 },
+ { IPv4(192, 168, 106, 0), 24, 3 },
+ { IPv4(192, 168, 55, 0), 24, 2 },
+ { IPv4(192, 168, 56, 0), 24, 3 },
+ /* Transport */
+ { IPv4(192, 168, 175, 0), 24, 2 },
+ { IPv4(192, 168, 176, 0), 24, 3 },
+ /* NULL */
+ { IPv4(192, 168, 200, 0), 24, 2 },
+ { IPv4(192, 168, 201, 0), 24, 3 },
+ /* Bypass */
+ { IPv4(192, 168, 240, 0), 24, 2 },
+ { IPv4(192, 168, 241, 0), 24, 3 },
+};
- { IPv4(192, 168, 200, 0), 24, 0 },
+static struct ip6_route rt_ip6_ep1[] = {
+ /* Outbound */
+ /* Tunnels */
+ { { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 }, 116, 0 },
+ { { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 }, 116, 1 },
+ /* Transport */
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 0 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 1 },
- { IPv4(192, 168, 240, 0), 24, 2 },
- { IPv4(192, 168, 250, 0), 24, 0 }
+ /* Inbound */
+ /* Tunnels */
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb,
+ 0xbb, 0xbb, 0xbb, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66,
+ 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ /* Transport */
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
};
void
-rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
char name[PATH_MAX];
- unsigned i;
- int ret;
+ uint32_t i;
+ int32_t ret;
struct rte_lpm *lpm;
- struct ipv4_route *rt;
+ struct rte_lpm6 *lpm6;
+ struct ip4_route *rt;
+ struct ip6_route *rt6;
char a, b, c, d;
- unsigned nb_routes;
+ uint32_t nb_routes, nb_routes6;
struct rte_lpm_config conf = { 0 };
+ struct rte_lpm6_config conf6 = { 0 };
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->rt_ipv4 != NULL)
- rte_exit(EXIT_FAILURE, "Routing Table for socket %u already "
- "initialized\n", socket_id);
+ if (ctx->rt_ip4 != NULL)
+ rte_exit(EXIT_FAILURE, "IPv4 Routing Table for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ctx->rt_ip6 != NULL)
+ rte_exit(EXIT_FAILURE, "IPv6 Routing Table for socket %u "
+ "already initialized\n", socket_id);
- printf("Creating Routing Table (RT) context with %u max routes\n",
+ printf("Creating IPv4 Routing Table (RT) context with %u max routes\n",
RT_IPV4_MAX_RULES);
if (ep == 0) {
- rt = rt_ipv4_ep0;
- nb_routes = RTE_DIM(rt_ipv4_ep0);
+ rt = rt_ip4_ep0;
+ nb_routes = RTE_DIM(rt_ip4_ep0);
+ rt6 = rt_ip6_ep0;
+ nb_routes6 = RTE_DIM(rt_ip6_ep0);
} else if (ep == 1) {
- rt = rt_ipv4_ep1;
- nb_routes = RTE_DIM(rt_ipv4_ep1);
+ rt = rt_ip4_ep1;
+ nb_routes = RTE_DIM(rt_ip4_ep1);
+ rt6 = rt_ip6_ep1;
+ nb_routes6 = RTE_DIM(rt_ip6_ep1);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. Only 0 or 1 "
"supported.\n", ep);
/* create the LPM table */
- snprintf(name, sizeof(name), "%s_%u", "rt_ipv4", socket_id);
+ snprintf(name, sizeof(name), "%s_%u", "rt_ip4", socket_id);
conf.max_rules = RT_IPV4_MAX_RULES;
conf.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES;
lpm = rte_lpm_create(name, socket_id, &conf);
if (lpm == NULL)
- rte_exit(EXIT_FAILURE, "Unable to create LPM table "
- "on socket %d\n", socket_id);
+ rte_exit(EXIT_FAILURE, "Unable to create %s LPM table "
+ "on socket %d\n", name, socket_id);
/* populate the LPM table */
for (i = 0; i < nb_routes; i++) {
ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].if_out);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Unable to add entry num %u to "
- "LPM table on socket %d\n", i, socket_id);
+ rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s "
+ "LPM table on socket %d\n", i, name, socket_id);
uint32_t_to_char(rt[i].ip, &a, &b, &c, &d);
printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n",
a, b, c, d, rt[i].depth, rt[i].if_out);
}
- ctx->rt_ipv4 = (struct rt_ctx *)lpm;
+ snprintf(name, sizeof(name), "%s_%u", "rt_ip6", socket_id);
+ conf6.max_rules = RT_IPV6_MAX_RULES;
+ conf6.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES;
+ lpm6 = rte_lpm6_create(name, socket_id, &conf6);
+ if (lpm6 == NULL)
+ rte_exit(EXIT_FAILURE, "Unable to create %s LPM table "
+ "on socket %d\n", name, socket_id);
+
+ /* populate the LPM table */
+ for (i = 0; i < nb_routes6; i++) {
+ ret = rte_lpm6_add(lpm6, rt6[i].ip, rt6[i].depth,
+ rt6[i].if_out);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s "
+ "LPM table on socket %d\n", i, name, socket_id);
+
+ printf("LPM6: Adding route "
+ " %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n",
+ (uint16_t)((rt6[i].ip[0] << 8) | rt6[i].ip[1]),
+ (uint16_t)((rt6[i].ip[2] << 8) | rt6[i].ip[3]),
+ (uint16_t)((rt6[i].ip[4] << 8) | rt6[i].ip[5]),
+ (uint16_t)((rt6[i].ip[6] << 8) | rt6[i].ip[7]),
+ (uint16_t)((rt6[i].ip[8] << 8) | rt6[i].ip[9]),
+ (uint16_t)((rt6[i].ip[10] << 8) | rt6[i].ip[11]),
+ (uint16_t)((rt6[i].ip[12] << 8) | rt6[i].ip[13]),
+ (uint16_t)((rt6[i].ip[14] << 8) | rt6[i].ip[15]),
+ rt6[i].depth, rt6[i].if_out);
+ }
+
+ ctx->rt_ip4 = (struct rt_ctx *)lpm;
+ ctx->rt_ip6 = (struct rt_ctx *)lpm6;
}
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index b6260ede..ab18b811 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -37,170 +37,200 @@
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <rte_memzone.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
+#include <rte_ip.h>
#include "ipsec.h"
#include "esp.h"
-/* SAs EP0 Outbound */
-const struct ipsec_sa sa_ep0_out[] = {
- { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
+/* SAs Outbound */
+const struct ipsec_sa sa_out[] = {
+ {
+ .spi = 5,
+ .src.ip4 = IPv4(172, 16, 1, 5),
+ .dst.ip4 = IPv4(172, 16, 2, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 6,
+ .src.ip4 = IPv4(172, 16, 1, 6),
+ .dst.ip4 = IPv4(172, 16, 2, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 10,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 11,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 15,
+ .src.ip4 = IPv4(172, 16, 1, 5),
+ .dst.ip4 = IPv4(172, 16, 2, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 16,
+ .src.ip4 = IPv4(172, 16, 1, 6),
+ .dst.ip4 = IPv4(172, 16, 2, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 25,
+ .src.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 },
+ .dst.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
+ {
+ .spi = 26,
+ .src.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 },
+ .dst.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
};
-/* SAs EP0 Inbound */
-const struct ipsec_sa sa_ep0_in[] = {
- { 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
-};
-
-/* SAs EP1 Outbound */
-const struct ipsec_sa sa_ep1_out[] = {
- { 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
-};
-
-/* SAs EP1 Inbound */
-const struct ipsec_sa sa_ep1_in[] = {
- { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
+/* SAs Inbound */
+const struct ipsec_sa sa_in[] = {
+ {
+ .spi = 105,
+ .src.ip4 = IPv4(172, 16, 2, 5),
+ .dst.ip4 = IPv4(172, 16, 1, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 106,
+ .src.ip4 = IPv4(172, 16, 2, 6),
+ .dst.ip4 = IPv4(172, 16, 1, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 110,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 111,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 115,
+ .src.ip4 = IPv4(172, 16, 2, 5),
+ .dst.ip4 = IPv4(172, 16, 1, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 116,
+ .src.ip4 = IPv4(172, 16, 2, 6),
+ .dst.ip4 = IPv4(172, 16, 1, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 125,
+ .src.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 },
+ .dst.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
+ {
+ .spi = 126,
+ .src.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 },
+ .dst.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
};
static uint8_t cipher_key[256] = "sixteenbytes key";
@@ -265,11 +295,11 @@ struct sa_ctx {
};
static struct sa_ctx *
-sa_ipv4_create(const char *name, int socket_id)
+sa_create(const char *name, int32_t socket_id)
{
char s[PATH_MAX];
struct sa_ctx *sa_ctx;
- unsigned mz_size;
+ uint32_t mz_size;
const struct rte_memzone *mz;
snprintf(s, sizeof(s), "%s_%u", name, socket_id);
@@ -294,10 +324,10 @@ sa_ipv4_create(const char *name, int socket_id)
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries, unsigned inbound)
+ uint32_t nb_entries, uint32_t inbound)
{
struct ipsec_sa *sa;
- unsigned i, idx;
+ uint32_t i, idx;
for (i = 0; i < nb_entries; i++) {
idx = SPI2IDX(entries[i].spi);
@@ -308,8 +338,14 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
return -EINVAL;
}
*sa = entries[i];
- sa->src = rte_cpu_to_be_32(sa->src);
- sa->dst = rte_cpu_to_be_32(sa->dst);
+ sa->seq = 0;
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ sa->src.ip4 = rte_cpu_to_be_32(sa->src.ip4);
+ sa->dst.ip4 = rte_cpu_to_be_32(sa->dst.ip4);
+ }
+
if (inbound) {
if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
sa_ctx->xf[idx].a = null_auth_xf;
@@ -337,65 +373,65 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries)
+ uint32_t nb_entries)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 0);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries)
+ uint32_t nb_entries)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 1);
}
void
-sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
const struct ipsec_sa *sa_out_entries, *sa_in_entries;
- unsigned nb_out_entries, nb_in_entries;
+ uint32_t nb_out_entries, nb_in_entries;
const char *name;
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->sa_ipv4_in != NULL)
+ if (ctx->sa_in != NULL)
rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
"initialized\n", socket_id);
- if (ctx->sa_ipv4_out != NULL)
+ if (ctx->sa_out != NULL)
rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
"initialized\n", socket_id);
if (ep == 0) {
- sa_out_entries = sa_ep0_out;
- nb_out_entries = RTE_DIM(sa_ep0_out);
- sa_in_entries = sa_ep0_in;
- nb_in_entries = RTE_DIM(sa_ep0_in);
+ sa_out_entries = sa_out;
+ nb_out_entries = RTE_DIM(sa_out);
+ sa_in_entries = sa_in;
+ nb_in_entries = RTE_DIM(sa_in);
} else if (ep == 1) {
- sa_out_entries = sa_ep1_out;
- nb_out_entries = RTE_DIM(sa_ep1_out);
- sa_in_entries = sa_ep1_in;
- nb_in_entries = RTE_DIM(sa_ep1_in);
+ sa_out_entries = sa_in;
+ nb_out_entries = RTE_DIM(sa_in);
+ sa_in_entries = sa_out;
+ nb_in_entries = RTE_DIM(sa_out);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
"Only 0 or 1 supported.\n", ep);
- name = "sa_ipv4_in";
- ctx->sa_ipv4_in = sa_ipv4_create(name, socket_id);
- if (ctx->sa_ipv4_in == NULL)
+ name = "sa_in";
+ ctx->sa_in = sa_create(name, socket_id);
+ if (ctx->sa_in == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
"in socket %d\n", rte_errno, name, socket_id);
- name = "sa_ipv4_out";
- ctx->sa_ipv4_out = sa_ipv4_create(name, socket_id);
- if (ctx->sa_ipv4_out == NULL)
+ name = "sa_out";
+ ctx->sa_out = sa_create(name, socket_id);
+ if (ctx->sa_out == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
"in socket %d\n", rte_errno, name, socket_id);
- sa_in_add_rules(ctx->sa_ipv4_in, sa_in_entries, nb_in_entries);
+ sa_in_add_rules(ctx->sa_in, sa_in_entries, nb_in_entries);
- sa_out_add_rules(ctx->sa_ipv4_out, sa_out_entries, nb_out_entries);
+ sa_out_add_rules(ctx->sa_out, sa_out_entries, nb_out_entries);
}
int
@@ -408,38 +444,66 @@ inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
}
+static inline void
+single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
+ struct ipsec_sa **sa_ret)
+{
+ struct esp_hdr *esp;
+ struct ip *ip;
+ uint32_t *src4_addr;
+ uint8_t *src6_addr;
+ struct ipsec_sa *sa;
+
+ *sa_ret = NULL;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+ if (ip->ip_v == IPVERSION)
+ esp = (struct esp_hdr *)(ip + 1);
+ else
+ esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
+
+ if (esp->spi == INVALID_SPI)
+ return;
+
+ sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
+ if (rte_be_to_cpu_32(esp->spi) != sa->spi)
+ return;
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
+ if ((ip->ip_v == IPVERSION) &&
+ (sa->src.ip4 == *src4_addr) &&
+ (sa->dst.ip4 == *(src4_addr + 1)))
+ *sa_ret = sa;
+ break;
+ case IP6_TUNNEL:
+ src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
+ if ((ip->ip_v == IP6_VERSION) &&
+ !memcmp(&sa->src.ip6, src6_addr, 16) &&
+ !memcmp(&sa->dst.ip6, src6_addr + 16, 16))
+ *sa_ret = sa;
+ break;
+ case TRANSPORT:
+ *sa_ret = sa;
+ }
+}
+
void
inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
struct ipsec_sa *sa[], uint16_t nb_pkts)
{
- unsigned i;
- uint32_t *src, spi;
-
- for (i = 0; i < nb_pkts; i++) {
- spi = rte_pktmbuf_mtod_offset(pkts[i], struct esp_hdr *,
- sizeof(struct ip))->spi;
-
- if (spi == INVALID_SPI)
- continue;
+ uint32_t i;
- sa[i] = &sa_ctx->sa[SPI2IDX(spi)];
- if (spi != sa[i]->spi) {
- sa[i] = NULL;
- continue;
- }
-
- src = rte_pktmbuf_mtod_offset(pkts[i], uint32_t *,
- offsetof(struct ip, ip_src));
- if ((sa[i]->src != *src) || (sa[i]->dst != *(src + 1)))
- sa[i] = NULL;
- }
+ for (i = 0; i < nb_pkts; i++)
+ single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
}
void
outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
struct ipsec_sa *sa[], uint16_t nb_pkts)
{
- unsigned i;
+ uint32_t i;
for (i = 0; i < nb_pkts; i++)
sa[i] = &sa_ctx->sa[sa_idx[i]];
diff --git a/examples/ipsec-secgw/sp.c b/examples/ipsec-secgw/sp4.c
index 4f167301..9c4b256b 100644
--- a/examples/ipsec-secgw/sp.c
+++ b/examples/ipsec-secgw/sp4.c
@@ -39,6 +39,7 @@
#include <netinet/ip.h>
#include <rte_acl.h>
+#include <rte_ip.h>
#include "ipsec.h"
@@ -71,7 +72,7 @@ enum {
RTE_ACL_IPV4_NUM
};
-struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
+struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = {
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
@@ -110,9 +111,9 @@ struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
},
};
-RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ipv4_defs));
+RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ip4_defs));
-const struct acl4_rules acl4_rules_in[] = {
+const struct acl4_rules acl4_rules_out[] = {
{
.data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
/* destination IPv4 */
@@ -124,7 +125,7 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2},
+ .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 106, 0),
.mask_range.u32 = 24,},
@@ -134,9 +135,9 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(7), .category_mask = 1, .priority = 3},
+ .data = {.userdata = PROTECT(10), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 107, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 175, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -144,9 +145,9 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(8), .category_mask = 1, .priority = 4},
+ .data = {.userdata = PROTECT(11), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 108, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 176, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -154,7 +155,7 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(9), .category_mask = 1, .priority = 5},
+ .data = {.userdata = PROTECT(15), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 200, 0),
.mask_range.u32 = 24,},
@@ -164,9 +165,49 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = BYPASS, .category_mask = 1, .priority = 6},
+ .data = {.userdata = PROTECT(16), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 250, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 201, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(25), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 55, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(26), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 56, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 240, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 241, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -175,9 +216,9 @@ const struct acl4_rules acl4_rules_in[] = {
}
};
-const struct acl4_rules acl4_rules_out[] = {
+const struct acl4_rules acl4_rules_in[] = {
{
- .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
+ .data = {.userdata = PROTECT(105), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 115, 0),
.mask_range.u32 = 24,},
@@ -187,7 +228,7 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2},
+ .data = {.userdata = PROTECT(106), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 116, 0),
.mask_range.u32 = 24,},
@@ -197,9 +238,9 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(7), .category_mask = 1, .priority = 3},
+ .data = {.userdata = PROTECT(110), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 117, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 185, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -207,9 +248,9 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(8), .category_mask = 1, .priority = 4},
+ .data = {.userdata = PROTECT(111), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 118, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 186, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -217,7 +258,7 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(9), .category_mask = 1, .priority = 5},
+ .data = {.userdata = PROTECT(115), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 210, 0),
.mask_range.u32 = 24,},
@@ -227,9 +268,49 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = BYPASS, .category_mask = 1, .priority = 6},
+ .data = {.userdata = PROTECT(116), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 240, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 211, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(125), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 65, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(126), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 66, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 245, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 246, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -239,9 +320,9 @@ const struct acl4_rules acl4_rules_out[] = {
};
static void
-print_one_ipv4_rule(const struct acl4_rules *rule, int extra)
+print_one_ip4_rule(const struct acl4_rules *rule, int32_t extra)
{
- unsigned char a, b, c, d;
+ uint8_t a, b, c, d;
uint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32,
&a, &b, &c, &d);
@@ -266,20 +347,20 @@ print_one_ipv4_rule(const struct acl4_rules *rule, int extra)
}
static inline void
-dump_ipv4_rules(const struct acl4_rules *rule, int num, int extra)
+dump_ip4_rules(const struct acl4_rules *rule, int32_t num, int32_t extra)
{
- int i;
+ int32_t i;
for (i = 0; i < num; i++, rule++) {
printf("\t%d:", i + 1);
- print_one_ipv4_rule(rule, extra);
+ print_one_ip4_rule(rule, extra);
printf("\n");
}
}
static struct rte_acl_ctx *
-acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
- unsigned rules_nb)
+acl4_init(const char *name, int32_t socketid, const struct acl4_rules *rules,
+ uint32_t rules_nb)
{
char s[PATH_MAX];
struct rte_acl_param acl_param;
@@ -294,11 +375,11 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
snprintf(s, sizeof(s), "%s_%d", name, socketid);
printf("IPv4 %s entries [%u]:\n", s, rules_nb);
- dump_ipv4_rules(rules, rules_nb, 1);
+ dump_ip4_rules(rules, rules_nb, 1);
acl_param.name = s;
acl_param.socket_id = socketid;
- acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ipv4_defs));
+ acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip4_defs));
acl_param.max_rule_num = MAX_ACL_RULE_NUM;
ctx = rte_acl_create(&acl_param);
@@ -313,8 +394,8 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
memset(&acl_build_param, 0, sizeof(acl_build_param));
acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
- acl_build_param.num_fields = RTE_DIM(ipv4_defs);
- memcpy(&acl_build_param.defs, ipv4_defs, sizeof(ipv4_defs));
+ acl_build_param.num_fields = RTE_DIM(ip4_defs);
+ memcpy(&acl_build_param.defs, ip4_defs, sizeof(ip4_defs));
if (rte_acl_build(ctx, &acl_build_param) != 0)
rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n");
@@ -325,42 +406,42 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
}
void
-sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
const char *name;
const struct acl4_rules *rules_out, *rules_in;
- unsigned nb_out_rules, nb_in_rules;
+ uint32_t nb_out_rules, nb_in_rules;
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->sp_ipv4_in != NULL)
+ if (ctx->sp_ip4_in != NULL)
rte_exit(EXIT_FAILURE, "Inbound SP DB for socket %u already "
"initialized\n", socket_id);
- if (ctx->sp_ipv4_out != NULL)
+ if (ctx->sp_ip4_out != NULL)
rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already "
"initialized\n", socket_id);
if (ep == 0) {
- rules_out = acl4_rules_in;
- nb_out_rules = RTE_DIM(acl4_rules_in);
- rules_in = acl4_rules_out;
- nb_in_rules = RTE_DIM(acl4_rules_out);
- } else if (ep == 1) {
rules_out = acl4_rules_out;
nb_out_rules = RTE_DIM(acl4_rules_out);
rules_in = acl4_rules_in;
nb_in_rules = RTE_DIM(acl4_rules_in);
+ } else if (ep == 1) {
+ rules_out = acl4_rules_in;
+ nb_out_rules = RTE_DIM(acl4_rules_in);
+ rules_in = acl4_rules_out;
+ nb_in_rules = RTE_DIM(acl4_rules_out);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
"Only 0 or 1 supported.\n", ep);
- name = "sp_ipv4_in";
- ctx->sp_ipv4_in = (struct sp_ctx *)acl4_init(name, socket_id,
+ name = "sp_ip4_in";
+ ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name, socket_id,
rules_in, nb_in_rules);
- name = "sp_ipv4_out";
- ctx->sp_ipv4_out = (struct sp_ctx *)acl4_init(name, socket_id,
+ name = "sp_ip4_out";
+ ctx->sp_ip4_out = (struct sp_ctx *)acl4_init(name, socket_id,
rules_out, nb_out_rules);
}
diff --git a/examples/ipsec-secgw/sp6.c b/examples/ipsec-secgw/sp6.c
new file mode 100644
index 00000000..1dda11a4
--- /dev/null
+++ b/examples/ipsec-secgw/sp6.c
@@ -0,0 +1,448 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Security Policies
+ */
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <netinet/ip6.h>
+
+#include <rte_acl.h>
+#include <rte_ip.h>
+
+#include "ipsec.h"
+
+#define MAX_ACL_RULE_NUM 1000
+
+enum {
+ IP6_PROTO,
+ IP6_SRC0,
+ IP6_SRC1,
+ IP6_SRC2,
+ IP6_SRC3,
+ IP6_DST0,
+ IP6_DST1,
+ IP6_DST2,
+ IP6_DST3,
+ IP6_SRCP,
+ IP6_DSTP,
+ IP6_NUM
+};
+
+#define IP6_ADDR_SIZE 16
+
+struct rte_acl_field_def ip6_defs[IP6_NUM] = {
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = IP6_PROTO,
+ .input_index = IP6_PROTO,
+ .offset = 0,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC0,
+ .input_index = IP6_SRC0,
+ .offset = 2
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC1,
+ .input_index = IP6_SRC1,
+ .offset = 6
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC2,
+ .input_index = IP6_SRC2,
+ .offset = 10
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC3,
+ .input_index = IP6_SRC3,
+ .offset = 14
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST0,
+ .input_index = IP6_DST0,
+ .offset = 18
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST1,
+ .input_index = IP6_DST1,
+ .offset = 22
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST2,
+ .input_index = IP6_DST2,
+ .offset = 26
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST3,
+ .input_index = IP6_DST3,
+ .offset = 30
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = IP6_SRCP,
+ .input_index = IP6_SRCP,
+ .offset = 34
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = IP6_DSTP,
+ .input_index = IP6_SRCP,
+ .offset = 36
+ }
+};
+
+RTE_ACL_RULE_DEF(acl6_rules, RTE_DIM(ip6_defs));
+
+const struct acl6_rules acl6_rules_out[] = {
+ {
+ .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x55555555, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x66666666, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(10), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x00000000, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(11), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(25), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xaaaaaaaa, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(26), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xbbbbbbbb, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ }
+};
+
+const struct acl6_rules acl6_rules_in[] = {
+ {
+ .data = {.userdata = PROTECT(15), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x55555555, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(16), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x66666666, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(110), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x00000000, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(111), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(125), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xaaaaaaaa, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(126), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xbbbbbbbb, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ }
+};
+
+static inline void
+print_one_ip6_rule(const struct acl6_rules *rule, int32_t extra)
+{
+ uint8_t a, b, c, d;
+
+ uint32_t_to_char(rule->field[IP6_SRC0].value.u32,
+ &a, &b, &c, &d);
+ printf("%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC1].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC2].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC3].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
+ rule->field[IP6_SRC0].mask_range.u32
+ + rule->field[IP6_SRC1].mask_range.u32
+ + rule->field[IP6_SRC2].mask_range.u32
+ + rule->field[IP6_SRC3].mask_range.u32);
+
+ uint32_t_to_char(rule->field[IP6_DST0].value.u32,
+ &a, &b, &c, &d);
+ printf("%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST1].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST2].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST3].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
+ rule->field[IP6_DST0].mask_range.u32
+ + rule->field[IP6_DST1].mask_range.u32
+ + rule->field[IP6_DST2].mask_range.u32
+ + rule->field[IP6_DST3].mask_range.u32);
+
+ printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ",
+ rule->field[IP6_SRCP].value.u16,
+ rule->field[IP6_SRCP].mask_range.u16,
+ rule->field[IP6_DSTP].value.u16,
+ rule->field[IP6_DSTP].mask_range.u16,
+ rule->field[IP6_PROTO].value.u8,
+ rule->field[IP6_PROTO].mask_range.u8);
+ if (extra)
+ printf("0x%x-0x%x-0x%x ",
+ rule->data.category_mask,
+ rule->data.priority,
+ rule->data.userdata);
+}
+
+static inline void
+dump_ip6_rules(const struct acl6_rules *rule, int32_t num, int32_t extra)
+{
+ int32_t i;
+
+ for (i = 0; i < num; i++, rule++) {
+ printf("\t%d:", i + 1);
+ print_one_ip6_rule(rule, extra);
+ printf("\n");
+ }
+}
+
+static struct rte_acl_ctx *
+acl6_init(const char *name, int32_t socketid, const struct acl6_rules *rules,
+ uint32_t rules_nb)
+{
+ char s[PATH_MAX];
+ struct rte_acl_param acl_param;
+ struct rte_acl_config acl_build_param;
+ struct rte_acl_ctx *ctx;
+
+ printf("Creating SP context with %u max rules\n", MAX_ACL_RULE_NUM);
+
+ memset(&acl_param, 0, sizeof(acl_param));
+
+ /* Create ACL contexts */
+ snprintf(s, sizeof(s), "%s_%d", name, socketid);
+
+ printf("IPv4 %s entries [%u]:\n", s, rules_nb);
+ dump_ip6_rules(rules, rules_nb, 1);
+
+ acl_param.name = s;
+ acl_param.socket_id = socketid;
+ acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip6_defs));
+ acl_param.max_rule_num = MAX_ACL_RULE_NUM;
+
+ ctx = rte_acl_create(&acl_param);
+ if (ctx == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to create ACL context\n");
+
+ if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules,
+ rules_nb) < 0)
+ rte_exit(EXIT_FAILURE, "add rules failed\n");
+
+ /* Perform builds */
+ memset(&acl_build_param, 0, sizeof(acl_build_param));
+
+ acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
+ acl_build_param.num_fields = RTE_DIM(ip6_defs);
+ memcpy(&acl_build_param.defs, ip6_defs, sizeof(ip6_defs));
+
+ if (rte_acl_build(ctx, &acl_build_param) != 0)
+ rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n");
+
+ rte_acl_dump(ctx);
+
+ return ctx;
+}
+
+void
+sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
+{
+ const char *name;
+ const struct acl6_rules *rules_out, *rules_in;
+ uint32_t nb_out_rules, nb_in_rules;
+
+ if (ctx == NULL)
+ rte_exit(EXIT_FAILURE, "NULL context.\n");
+
+ if (ctx->sp_ip6_in != NULL)
+ rte_exit(EXIT_FAILURE, "Inbound IPv6 SP DB for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ctx->sp_ip6_out != NULL)
+ rte_exit(EXIT_FAILURE, "Outbound IPv6 SP DB for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ep == 0) {
+ rules_out = acl6_rules_out;
+ nb_out_rules = RTE_DIM(acl6_rules_out);
+ rules_in = acl6_rules_in;
+ nb_in_rules = RTE_DIM(acl6_rules_in);
+ } else if (ep == 1) {
+ rules_out = acl6_rules_in;
+ nb_out_rules = RTE_DIM(acl6_rules_in);
+ rules_in = acl6_rules_out;
+ nb_in_rules = RTE_DIM(acl6_rules_out);
+ } else
+ rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
+ "Only 0 or 1 supported.\n", ep);
+
+ name = "sp_ip6_in";
+ ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name, socket_id,
+ rules_in, nb_in_rules);
+
+ name = "sp_ip6_out";
+ ctx->sp_ip6_out = (struct sp_ctx *)acl6_init(name, socket_id,
+ rules_out, nb_out_rules);
+}
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 96b41578..f013d927 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -321,7 +321,7 @@ mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
/* Construct Ethernet header. */
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr));
- RTE_MBUF_ASSERT(ethdr != NULL);
+ RTE_ASSERT(ethdr != NULL);
ether_addr_copy(dest_addr, &ethdr->d_addr);
ether_addr_copy(&ports_eth_addr[port], &ethdr->s_addr);
@@ -353,7 +353,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
/* Remove the Ethernet header from the input packet */
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
- RTE_MBUF_ASSERT(iphdr != NULL);
+ RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
diff --git a/examples/kni/main.c b/examples/kni/main.c
index a5297f28..f9fc61e0 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -318,8 +318,6 @@ main_loop(__rte_unused void *arg)
};
enum lcore_rxtx flag = LCORE_NONE;
- nb_ports = (uint8_t)(nb_ports < RTE_MAX_ETHPORTS ?
- nb_ports : RTE_MAX_ETHPORTS);
for (i = 0; i < nb_ports; i++) {
if (!kni_port_params_array[i])
continue;
@@ -831,7 +829,8 @@ kni_free_kni(uint8_t port_id)
return -1;
for (i = 0; i < p[port_id]->nb_kni; i++) {
- rte_kni_release(p[port_id]->kni[i]);
+ if (rte_kni_release(p[port_id]->kni[i]))
+ printf("Fail to release kni\n");
p[port_id]->kni[i] = NULL;
}
rte_eth_dev_stop(port_id);
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index d4e2d8de..8dc616d4 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -341,20 +341,25 @@ fill_supported_algorithm_tables(void)
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL");
+ strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC],
+ "AES_XCBC_MAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2");
+ strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9");
for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++)
strcpy(supported_cipher_algo[i], "NOT_SUPPORTED");
strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CBC], "AES_CBC");
+ strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CTR], "AES_CTR");
strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM");
strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL");
strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2");
+ strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8");
}
@@ -463,8 +468,9 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
op->sym->auth.digest.length = cparams->digest_length;
- /* For SNOW3G algorithms, offset/length must be in bits */
- if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+ /* For SNOW3G/KASUMI algorithms, offset/length must be in bits */
+ if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+ cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
op->sym->auth.data.offset = ipdata_offset << 3;
op->sym->auth.data.length = data_len << 3;
} else {
@@ -485,7 +491,8 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
op->sym->cipher.iv.length = cparams->iv.length;
/* For SNOW3G algorithms, offset/length must be in bits */
- if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2) {
+ if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+ cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8) {
op->sym->cipher.data.offset = ipdata_offset << 3;
if (cparams->do_hash && cparams->hash_verify)
/* Do not cipher the hash tag */
@@ -1486,6 +1493,15 @@ check_supported_size(uint16_t length, uint16_t min, uint16_t max,
{
uint16_t supp_size;
+ /* Single value */
+ if (increment == 0) {
+ if (length == min)
+ return 0;
+ else
+ return -1;
+ }
+
+ /* Range of values */
for (supp_size = min; supp_size <= max; supp_size += increment) {
if (length == supp_size)
return 0;
@@ -1785,9 +1801,6 @@ initialize_ports(struct l2fwd_crypto_options *options)
return -1;
}
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* Reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
diff --git a/examples/l2fwd-ivshmem/host/host.c b/examples/l2fwd-ivshmem/host/host.c
index 4bd7c41d..cd284b7d 100644
--- a/examples/l2fwd-ivshmem/host/host.c
+++ b/examples/l2fwd-ivshmem/host/host.c
@@ -677,9 +677,6 @@ int main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/*
* reserve memzone to communicate with VMs - we cannot use rte_malloc here
* because while it is technically possible, it is a very bad idea to share
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 9f3a77d2..614ea604 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -811,9 +811,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
@@ -990,7 +987,7 @@ main(int argc, char **argv)
struct rte_jobstats *job = &qconf->port_fwd_jobs[i];
portid = qconf->rx_port_list[i];
- printf("Setting forward jon for port %u\n", portid);
+ printf("Setting forward job for port %u\n", portid);
snprintf(name, RTE_DIM(name), "port %u fwd", portid);
/* Setup forward job.
diff --git a/examples/l2fwd-keepalive/Makefile b/examples/l2fwd-keepalive/Makefile
index 568edcb4..ca45a798 100644
--- a/examples/l2fwd-keepalive/Makefile
+++ b/examples/l2fwd-keepalive/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -42,9 +42,10 @@ include $(RTE_SDK)/mk/rte.vars.mk
APP = l2fwd-keepalive
# all source are stored in SRCS-y
-SRCS-y := main.c
+SRCS-y := main.c shm.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDFLAGS += -lrt
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l2fwd-keepalive/ka-agent/Makefile b/examples/l2fwd-keepalive/ka-agent/Makefile
new file mode 100644
index 00000000..fd0c38b4
--- /dev/null
+++ b/examples/l2fwd-keepalive/ka-agent/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = ka-agent
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)/../
+LDFLAGS += -lrt
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l2fwd-keepalive/ka-agent/main.c b/examples/l2fwd-keepalive/ka-agent/main.c
new file mode 100644
index 00000000..be1c7f49
--- /dev/null
+++ b/examples/l2fwd-keepalive/ka-agent/main.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+
+#include <rte_keepalive.h>
+
+#include <shm.h>
+
+#define MAX_TIMEOUTS 4
+#define SEM_TIMEOUT_SECS 2
+
+static struct rte_keepalive_shm *ka_shm_create(void)
+{
+ int fd = shm_open(RTE_KEEPALIVE_SHM_NAME, O_RDWR, 0666);
+ size_t size = sizeof(struct rte_keepalive_shm);
+ struct rte_keepalive_shm *shm;
+
+ if (fd < 0)
+ printf("Failed to open %s as SHM:%s\n",
+ RTE_KEEPALIVE_SHM_NAME,
+ strerror(errno));
+ else {
+ shm = (struct rte_keepalive_shm *) mmap(
+ 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (shm == MAP_FAILED)
+ printf("Failed to mmap SHM:%s\n", strerror(errno));
+ else
+ return shm;
+ }
+
+ /* Reset to zero, as it was set to MAP_FAILED aka: (void *)-1 */
+ shm = 0;
+ return NULL;
+}
+
+int main(void)
+{
+ struct rte_keepalive_shm *shm = ka_shm_create();
+ struct timespec timeout = { .tv_nsec = 0 };
+ int idx_core;
+ int cnt_cores;
+ uint64_t last_seen_alive_time = 0;
+ uint64_t most_recent_alive_time;
+ int cnt_timeouts = 0;
+ int sem_errno;
+
+ if (shm == NULL) {
+ printf("Unable to access shared core state\n");
+ return 1;
+ }
+ while (1) {
+ most_recent_alive_time = 0;
+ for (idx_core = 0; idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_last_seen_times[idx_core] >
+ most_recent_alive_time)
+ most_recent_alive_time =
+ shm->core_last_seen_times[idx_core];
+
+ timeout.tv_sec = time(NULL) + SEM_TIMEOUT_SECS;
+ if (sem_timedwait(&shm->core_died, &timeout) == -1) {
+ /* Assume no core death signals and no change in any
+ * last-seen times is the keepalive monitor itself
+ * failing.
+ */
+ sem_errno = errno;
+ last_seen_alive_time = most_recent_alive_time;
+ if (sem_errno == ETIMEDOUT) {
+ if (last_seen_alive_time ==
+ most_recent_alive_time &&
+ cnt_timeouts++ >
+ MAX_TIMEOUTS) {
+ printf("No updates. Exiting..\n");
+ break;
+ }
+ } else
+ printf("sem_timedwait() error (%s)\n",
+ strerror(sem_errno));
+ continue;
+ }
+ cnt_timeouts = 0;
+
+ cnt_cores = 0;
+ for (idx_core = 0; idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_state[idx_core] == RTE_KA_STATE_DEAD)
+ cnt_cores++;
+ if (cnt_cores == 0) {
+ /* Can happen if core was restarted since Semaphore
+ * was sent, due to agent being offline.
+ */
+ printf("Warning: Empty dead core report\n");
+ continue;
+ }
+
+ printf("%i dead cores: ", cnt_cores);
+ for (idx_core = 0;
+ idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_state[idx_core] == RTE_KA_STATE_DEAD)
+ printf("%d, ", idx_core);
+ printf("\b\b\n");
+ }
+ if (munmap(shm, sizeof(struct rte_keepalive_shm)) != 0)
+ printf("Warning: munmap() failed\n");
+ return 0;
+}
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 8da89aa1..84a59eb6 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -72,6 +72,8 @@
#include <rte_timer.h>
#include <rte_keepalive.h>
+#include "shm.h"
+
#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
#define NB_MBUF 8192
@@ -523,7 +525,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
}
static void
-dead_core(__attribute__((unused)) void *ptr_data, const int id_core)
+dead_core(__rte_unused void *ptr_data, const int id_core)
{
printf("Dead core %i - restarting..\n", id_core);
if (rte_eal_get_lcore_state(id_core) == FINISHED) {
@@ -534,6 +536,14 @@ dead_core(__attribute__((unused)) void *ptr_data, const int id_core)
}
}
+static void
+relay_core_state(void *ptr_data, const int id_core,
+ const enum rte_keepalive_state core_state, uint64_t last_alive)
+{
+ rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data,
+ id_core, core_state, last_alive);
+}
+
int
main(int argc, char **argv)
{
@@ -570,9 +580,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
@@ -725,10 +732,18 @@ main(int argc, char **argv)
rte_timer_init(&stats_timer);
if (check_period > 0) {
+ struct rte_keepalive_shm *ka_shm;
+
+ ka_shm = rte_keepalive_shm_create();
+ if (ka_shm == NULL)
+ rte_exit(EXIT_FAILURE,
+ "rte_keepalive_shm_create() failed");
rte_global_keepalive_info =
- rte_keepalive_create(&dead_core, NULL);
+ rte_keepalive_create(&dead_core, ka_shm);
if (rte_global_keepalive_info == NULL)
rte_exit(EXIT_FAILURE, "init_keep_alive() failed");
+ rte_keepalive_register_relay_callback(rte_global_keepalive_info,
+ relay_core_state, ka_shm);
rte_timer_init(&hb_timer);
if (rte_timer_reset(&hb_timer,
(check_period * rte_get_timer_hz()) / 1000,
diff --git a/examples/l2fwd-keepalive/shm.c b/examples/l2fwd-keepalive/shm.c
new file mode 100644
index 00000000..177aa5b8
--- /dev/null
+++ b/examples/l2fwd-keepalive/shm.c
@@ -0,0 +1,131 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_keepalive.h>
+
+#include "shm.h"
+
+struct rte_keepalive_shm *rte_keepalive_shm_create(void)
+{
+ int fd;
+ int idx_core;
+ struct rte_keepalive_shm *ka_shm;
+
+ /* If any existing object is not unlinked, it makes it all too easy
+ * for clients to end up with stale shared memory blocks when
+ * restarted. Unlinking makes sure subsequent shm_open by clients
+ * will get the new block mapped below.
+ */
+ if (shm_unlink(RTE_KEEPALIVE_SHM_NAME) == -1 && errno != ENOENT)
+ printf("Warning: Error unlinking stale %s (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME, strerror(errno));
+
+ fd = shm_open(RTE_KEEPALIVE_SHM_NAME,
+ O_CREAT | O_TRUNC | O_RDWR, 0666);
+ if (fd < 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to open %s as SHM (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME,
+ strerror(errno));
+ else if (ftruncate(fd, sizeof(struct rte_keepalive_shm)) != 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to resize SHM (%s)\n", strerror(errno));
+ else {
+ ka_shm = (struct rte_keepalive_shm *) mmap(
+ 0, sizeof(struct rte_keepalive_shm),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (ka_shm == MAP_FAILED)
+ RTE_LOG(INFO, EAL,
+ "Failed to mmap SHM (%s)\n", strerror(errno));
+ else {
+ memset(ka_shm, 0, sizeof(struct rte_keepalive_shm));
+
+ /* Initialize the semaphores for IPC/SHM use */
+ if (sem_init(&ka_shm->core_died, 1, 0) != 0) {
+ RTE_LOG(INFO, EAL,
+ "Failed to setup SHM semaphore (%s)\n",
+ strerror(errno));
+ munmap(ka_shm,
+ sizeof(struct rte_keepalive_shm));
+ return NULL;
+ }
+
+ /* Set all cores to 'not present' */
+ for (idx_core = 0;
+ idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++) {
+ ka_shm->core_state[idx_core] =
+ RTE_KA_STATE_UNUSED;
+ ka_shm->core_last_seen_times[idx_core] = 0;
+ }
+
+ return ka_shm;
+ }
+ }
+return NULL;
+}
+
+void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
+ const int id_core, const enum rte_keepalive_state core_state,
+ __rte_unused uint64_t last_alive)
+{
+ int count;
+
+ shm->core_state[id_core] = core_state;
+ shm->core_last_seen_times[id_core] = last_alive;
+
+ if (core_state == RTE_KEEPALIVE_SHM_DEAD) {
+ /* Since core has died, also signal ka_agent.
+ *
+ * Limit number of times semaphore can be incremented, in case
+ * ka_agent is not active.
+ */
+ if (sem_getvalue(&shm->core_died, &count) == -1) {
+ RTE_LOG(INFO, EAL, "Semaphore check failed(%s)\n",
+ strerror(errno));
+ return;
+ }
+ if (count > 1)
+ return;
+
+ if (sem_post(&shm->core_died) != 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to increment semaphore (%s)\n",
+ strerror(errno));
+ }
+}
diff --git a/examples/l2fwd-keepalive/shm.h b/examples/l2fwd-keepalive/shm.h
new file mode 100644
index 00000000..25e1b61d
--- /dev/null
+++ b/examples/l2fwd-keepalive/shm.h
@@ -0,0 +1,89 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define RTE_KEEPALIVE_SHM_NAME "/dpdk_keepalive_shm_name"
+
+#define RTE_KEEPALIVE_SHM_ALIVE 1
+#define RTE_KEEPALIVE_SHM_DEAD 2
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <semaphore.h>
+#include <rte_keepalive.h>
+
+/**
+ * Keepalive SHM structure.
+ *
+ * The shared memory allocated by the primary is this size, and contains the
+ * information as contained within this struct. A secondary may open the SHM,
+ * and read the contents.
+ */
+struct rte_keepalive_shm {
+ /** IPC semaphore. Posted when a core dies */
+ sem_t core_died;
+
+ /**
+ * Relayed status of each core.
+ */
+ enum rte_keepalive_state core_state[RTE_KEEPALIVE_MAXCORES];
+
+ /**
+ * Last-seen-alive timestamps for the cores
+ */
+ uint64_t core_last_seen_times[RTE_KEEPALIVE_MAXCORES];
+};
+
+/**
+ * Create shared host memory keepalive object.
+ * @return
+ * Pointer to SHM keepalive structure, or NULL on failure.
+ */
+struct rte_keepalive_shm *rte_keepalive_shm_create(void);
+
+/**
+ * Relays state for given core
+ * @param *shm
+ * Pointer to SHM keepalive structure.
+ * @param id_core
+ * Id of core
+ * @param core_state
+ * State of core
+ * @param last_alive
+ * Last seen timestamp for core
+ */
+void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
+ const int id_core, const enum rte_keepalive_state core_state,
+ uint64_t last_alive);
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 1ad94887..88979216 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -80,6 +80,7 @@ static volatile bool force_quit;
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MEMPOOL_CACHE_SIZE 256
/*
* Configurable number of RX/TX ring descriptors
@@ -134,10 +135,9 @@ struct l2fwd_port_statistics {
} __rte_cache_aligned;
struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
-/* A tsc-based timer responsible for triggering statistics printout */
-#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
#define MAX_TIMER_PERIOD 86400 /* 1 day max */
-static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
+/* A tsc-based timer responsible for triggering statistics printout */
+static uint64_t timer_period = 10; /* default period is 10 seconds */
/* Print out statistics on packets dropped */
static void
@@ -274,7 +274,7 @@ l2fwd_main_loop(void)
timer_tsc += diff_tsc;
/* if timer has reached its timeout */
- if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
+ if (unlikely(timer_tsc >= timer_period)) {
/* do this only on master core */
if (lcore_id == rte_get_master_lcore()) {
@@ -381,7 +381,7 @@ l2fwd_parse_timer_period(const char *q_arg)
static int
l2fwd_parse_args(int argc, char **argv)
{
- int opt, ret;
+ int opt, ret, timer_secs;
char **argvopt;
int option_index;
char *prgname = argv[0];
@@ -417,12 +417,13 @@ l2fwd_parse_args(int argc, char **argv)
/* timer period */
case 'T':
- timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
- if (timer_period < 0) {
+ timer_secs = l2fwd_parse_timer_period(optarg);
+ if (timer_secs < 0) {
printf("invalid timer period\n");
l2fwd_usage(prgname);
return -1;
}
+ timer_period = timer_secs;
break;
/* long options */
@@ -541,9 +542,13 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+ /* convert to number of cycles */
+ timer_period *= rte_get_timer_hz();
+
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+ MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
@@ -551,9 +556,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 26d9f5eb..16f6110e 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -72,6 +72,9 @@
#include <rte_string_fns.h>
#include <rte_acl.h>
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#define L3FWDACL_DEBUG
+#endif
#define DO_RFC_1812_CHECKS
#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
@@ -1914,8 +1917,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index cb42bfb9..f746960e 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1572,10 +1572,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
-
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index 034c22a7..ca01b112 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -982,8 +982,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index fc59243d..def5a024 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -259,6 +259,8 @@ em_mask_key(void *key, xmm_t mask)
return vandq_s32(data, mask);
}
+#else
+#error No vector engine (SSE, NEON) available, check your toolchain
#endif
static inline uint8_t
diff --git a/examples/l3fwd/l3fwd_em_hlm_sse.h b/examples/l3fwd/l3fwd_em_hlm_sse.h
index 5001c724..7714a20c 100644
--- a/examples/l3fwd/l3fwd_em_hlm_sse.h
+++ b/examples/l3fwd/l3fwd_em_hlm_sse.h
@@ -81,7 +81,7 @@ em_get_dst_port_ipv4x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
- rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 8, ret);
+ rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0], 8, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ?
portid : ipv4_l3fwd_out_if[ret[0]]);
@@ -179,7 +179,7 @@ em_get_dst_port_ipv6x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
- rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
+ rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ?
portid : ipv6_l3fwd_out_if[ret[0]]);
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index bf6d8856..7a79cd2c 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -868,8 +868,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index 99815989..04dc3e40 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -580,9 +580,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_panic("No Ethernet port - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/*
* Each logical core is assigned a dedicated TX queue on each port.
*/
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
index 2dc8b829..2d951d93 100644
--- a/examples/multi_process/l2fwd_fork/main.c
+++ b/examples/multi_process/l2fwd_fork/main.c
@@ -442,7 +442,8 @@ reset_slave_all_ports(unsigned slaveid)
pool = rte_mempool_lookup(buf_name);
if (pool)
printf("Port %d mempool free object is %u(%u)\n", slave->port[i],
- rte_mempool_count(pool), (unsigned)NB_MBUF);
+ rte_mempool_avail_count(pool),
+ (unsigned int)NB_MBUF);
else
printf("Can't find mempool %s\n", buf_name);
@@ -979,9 +980,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* create the mbuf pool */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
diff --git a/examples/netmap_compat/lib/compat_netmap.c b/examples/netmap_compat/lib/compat_netmap.c
index bf1b418a..112c551f 100644
--- a/examples/netmap_compat/lib/compat_netmap.c
+++ b/examples/netmap_compat/lib/compat_netmap.c
@@ -865,6 +865,9 @@ rte_netmap_poll(struct pollfd *fds, nfds_t nfds, int timeout)
uint32_t i, idx, port;
uint32_t want_rx, want_tx;
+ if (timeout > 0)
+ return -1;
+
ret = 0;
do {
for (i = 0; i < nfds; i++) {
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 15bb900c..3c88b86e 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -55,17 +55,6 @@
#define RING_SIZE 16384
-/* uncomment below line to enable debug logs */
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_REORDERAPP RTE_LOGTYPE_USER1
@@ -240,7 +229,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
/* free the mbufs which failed from transmit */
app_stats.tx.ro_tx_failed_pkts += count;
- LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
+ RTE_LOG(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
pktmbuf_free_bulk(unsent, count);
}
@@ -421,7 +410,7 @@ rx_thread(struct rte_ring *ring_out)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
pkts, MAX_PKTS_BURST);
if (nb_rx_pkts == 0) {
- LOG_DEBUG(REORDERAPP,
+ RTE_LOG(DEBUG, REORDERAPP,
"%s():Received zero packets\n", __func__);
continue;
}
@@ -533,7 +522,8 @@ send_thread(struct send_thread_args *args)
if (ret == -1 && rte_errno == ERANGE) {
/* Too early pkts should be transmitted out directly */
- LOG_DEBUG(REORDERAPP, "%s():Cannot reorder early packet "
+ RTE_LOG(DEBUG, REORDERAPP,
+ "%s():Cannot reorder early packet "
"direct enqueuing to TX\n", __func__);
outp = mbufs[i]->port;
if ((portmask & (1 << outp)) == 0) {
diff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c
index 8fbff737..062275a4 100644
--- a/examples/performance-thread/common/lthread.c
+++ b/examples/performance-thread/common/lthread.c
@@ -143,7 +143,7 @@ struct lthread_stack *_stack_alloc(void)
struct lthread_stack *s;
s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);
- LTHREAD_ASSERT(s != NULL);
+ RTE_ASSERT(s != NULL);
s->root_sched = THIS_SCHED;
s->stack_size = LTHREAD_MAX_STACK_SIZE;
diff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h
index c8357f4a..b858b55e 100644
--- a/examples/performance-thread/common/lthread_int.h
+++ b/examples/performance-thread/common/lthread_int.h
@@ -197,16 +197,4 @@ struct lthread {
uint64_t diag_ref; /* ref to user diag data */
} __rte_cache_aligned;
-/*
- * Assert
- */
-#if LTHREAD_DIAG
-#define LTHREAD_ASSERT(expr) do { \
- if (!(expr)) \
- rte_panic("line%d\tassert \"" #expr "\" failed\n", __LINE__);\
-} while (0)
-#else
-#define LTHREAD_ASSERT(expr) do {} while (0)
-#endif
-
#endif /* LTHREAD_INT_H */
diff --git a/examples/performance-thread/common/lthread_mutex.c b/examples/performance-thread/common/lthread_mutex.c
index af8b82d2..c1bc6271 100644
--- a/examples/performance-thread/common/lthread_mutex.c
+++ b/examples/performance-thread/common/lthread_mutex.c
@@ -170,7 +170,6 @@ int lthread_mutex_lock(struct lthread_mutex *m)
_suspend();
/* resumed, must loop and compete for the lock again */
}
- LTHREAD_ASSERT(0);
return 0;
}
@@ -231,7 +230,7 @@ int lthread_mutex_unlock(struct lthread_mutex *m)
if (unblocked != NULL) {
rte_atomic64_dec(&m->count);
DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
- LTHREAD_ASSERT(unblocked->sched != NULL);
+ RTE_ASSERT(unblocked->sched != NULL);
_ready_queue_insert((struct lthread_sched *)
unblocked->sched, unblocked);
break;
diff --git a/examples/performance-thread/common/lthread_pool.h b/examples/performance-thread/common/lthread_pool.h
index a5f32515..27680eab 100644
--- a/examples/performance-thread/common/lthread_pool.h
+++ b/examples/performance-thread/common/lthread_pool.h
@@ -138,14 +138,14 @@ _qnode_pool_create(const char *name, int prealloc_size) {
RTE_CACHE_LINE_SIZE,
rte_socket_id());
- LTHREAD_ASSERT(p);
+ RTE_ASSERT(p);
p->stub = rte_malloc_socket(NULL,
sizeof(struct qnode),
RTE_CACHE_LINE_SIZE,
rte_socket_id());
- LTHREAD_ASSERT(p->stub);
+ RTE_ASSERT(p->stub);
if (name != NULL)
strncpy(p->name, name, LT_MAX_NAME_SIZE);
diff --git a/examples/performance-thread/common/lthread_queue.h b/examples/performance-thread/common/lthread_queue.h
index 0c395167..2c55fcec 100644
--- a/examples/performance-thread/common/lthread_queue.h
+++ b/examples/performance-thread/common/lthread_queue.h
@@ -129,7 +129,7 @@ _lthread_queue_create(const char *name)
/* allocated stub node */
stub = _qnode_alloc();
- LTHREAD_ASSERT(stub);
+ RTE_ASSERT(stub);
if (name != NULL)
strncpy(new_queue->name, name, sizeof(new_queue->name));
diff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c
index 7c40bc05..c64c21ff 100644
--- a/examples/performance-thread/common/lthread_sched.c
+++ b/examples/performance-thread/common/lthread_sched.c
@@ -268,7 +268,7 @@ struct lthread_sched *_lthread_sched_create(size_t stack_size)
struct lthread_sched *new_sched;
unsigned lcoreid = rte_lcore_id();
- LTHREAD_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
+ RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
if (stack_size == 0)
stack_size = LTHREAD_MAX_STACK_SIZE;
diff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c
index 43cda4ff..6876f831 100644
--- a/examples/performance-thread/common/lthread_tls.c
+++ b/examples/performance-thread/common/lthread_tls.c
@@ -94,7 +94,7 @@ void _lthread_key_pool_init(void)
pool = rte_ring_create(name,
LTHREAD_MAX_KEYS, 0, 0);
- LTHREAD_ASSERT(pool);
+ RTE_ASSERT(pool);
int i;
@@ -240,7 +240,7 @@ void _lthread_tls_alloc(struct lthread *lt)
tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);
- LTHREAD_ASSERT(tls != NULL);
+ RTE_ASSERT(tls != NULL);
tls->root_sched = (THIS_SCHED);
lt->tls = tls;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 15c0a4de..fdc90b28 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -996,7 +996,7 @@ simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
- rte_hash_lookup_multi(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
+ rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
&key_array[0], 8, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
@@ -1150,7 +1150,7 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
- rte_hash_lookup_multi(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
+ rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
&key_array[0], 4, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv6_l3fwd_out_if[ret[0]]);
dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv6_l3fwd_out_if[ret[1]]);
@@ -1307,7 +1307,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
* to BAD_PORT value.
*/
static inline __attribute__((always_inline)) void
-rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint32_t *dp, uint32_t ptype)
+rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
{
uint8_t ihl;
@@ -1326,7 +1326,7 @@ rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint32_t *dp, uint32_t ptype)
}
#else
-#define rfc1812_process(mb, dp) do { } while (0)
+#define rfc1812_process(mb, dp, ptype) do { } while (0)
#endif /* DO_RFC_1812_CHECKS */
#endif /* APP_LOOKUP_LPM && ENABLE_MULTI_BUFFER_OPTIMIZE */
@@ -1343,28 +1343,27 @@ get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
struct ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
- if (rte_lpm_lookup(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
- dst_ipv4, &next_hop_ipv4) != 0) {
- next_hop_ipv4 = portid;
- return next_hop_ipv4;
- }
+ return (uint16_t) ((rte_lpm_lookup(
+ RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct, dst_ipv4,
+ &next_hop_ipv4) == 0) ? next_hop_ipv4 : portid);
+
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
+
eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
- if (rte_lpm6_lookup(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop_ipv6) != 0) {
- next_hop_ipv6 = portid;
- return next_hop_ipv6;
- }
- } else {
- next_hop_ipv4 = portid;
- return next_hop_ipv4;
+
+ return (uint16_t) ((rte_lpm6_lookup(
+ RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
+ ipv6_hdr->dst_addr, &next_hop_ipv6) == 0) ? next_hop_ipv6 :
+ portid);
+
}
+ return portid;
}
static inline void
-process_packet(struct rte_mbuf *pkt, uint32_t *dst_port, uint8_t portid)
+process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint8_t portid)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
@@ -1431,9 +1430,9 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
static inline void
processx4_step2(__m128i dip,
uint32_t ipv4_flag,
- uint32_t portid,
+ uint8_t portid,
struct rte_mbuf *pkt[FWDSTEP],
- uint32_t dprt[FWDSTEP])
+ uint16_t dprt[FWDSTEP])
{
rte_xmm_t dst;
const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
@@ -1445,7 +1444,11 @@ processx4_step2(__m128i dip,
/* if all 4 packets are IPV4. */
if (likely(ipv4_flag)) {
rte_lpm_lookupx4(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct, dip,
- dprt, portid);
+ dst.u32, portid);
+
+ /* get rid of unused upper 16 bit for each dport. */
+ dst.x = _mm_packs_epi32(dst.x, dst.x);
+ *(uint64_t *)dprt = dst.u64[0];
} else {
dst.x = dip;
dprt[0] = get_dst_port(pkt[0], dst.u32[0], portid);
@@ -1460,7 +1463,7 @@ processx4_step2(__m128i dip,
* Perform RFC1812 checks and updates for IPV4 packets.
*/
static inline void
-processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint32_t dst_port[FWDSTEP])
+processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
{
__m128i te[FWDSTEP];
__m128i ve[FWDSTEP];
@@ -1658,9 +1661,9 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
/* if dest port value has changed. */
if (v != GRPMSK) {
- lp = pnum->u16 + gptbl[v].idx;
- lp[0] = 1;
pnum->u64 = gptbl[v].pnum;
+ pnum->u16[FWDSTEP] = 1;
+ lp = pnum->u16 + gptbl[v].idx;
}
return lp;
@@ -1679,7 +1682,7 @@ process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
int32_t k;
uint16_t dlp;
uint16_t *lp;
- uint32_t dst_port[MAX_PKT_BURST];
+ uint16_t dst_port[MAX_PKT_BURST];
__m128i dip[MAX_PKT_BURST / FWDSTEP];
uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
uint16_t pnum[MAX_PKT_BURST + 1];
@@ -3482,8 +3485,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_rx_rings failed\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index b968b001..15656155 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -133,14 +133,20 @@ struct rte_meter_trtcm_params app_trtcm_params[] = {
FLOW_METER app_flows[APP_FLOWS_MAX];
-static void
+static int
app_configure_flow_table(void)
{
uint32_t i, j;
+ int ret;
- for (i = 0, j = 0; i < APP_FLOWS_MAX; i ++, j = (j + 1) % RTE_DIM(PARAMS)){
- FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
+ for (i = 0, j = 0; i < APP_FLOWS_MAX;
+ i ++, j = (j + 1) % RTE_DIM(PARAMS)) {
+ ret = FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static inline void
@@ -381,7 +387,9 @@ main(int argc, char **argv)
rte_eth_promiscuous_enable(port_tx);
/* App configuration */
- app_configure_flow_table();
+ ret = app_configure_flow_table();
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid configure flow table\n");
/* Launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/qos_meter/main.h b/examples/qos_meter/main.h
index 530bf69c..54867dcf 100644
--- a/examples/qos_meter/main.h
+++ b/examples/qos_meter/main.h
@@ -51,7 +51,7 @@ enum policer_action policer_table[e_RTE_METER_COLORS][e_RTE_METER_COLORS] =
#if APP_MODE == APP_MODE_FWD
#define FUNC_METER(a,b,c,d) color, flow_id=flow_id, pkt_len=pkt_len, time=time
-#define FUNC_CONFIG(a,b)
+#define FUNC_CONFIG(a, b) 0
#define PARAMS app_srtcm_params
#define FLOW_METER int
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index 3e7fd087..476a0ee1 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -123,7 +123,7 @@ app_eal_core_mask(void)
uint64_t cm = 0;
struct rte_config *cfg = rte_eal_get_configuration();
- for (i = 0; i < RTE_MAX_LCORE; i++) {
+ for (i = 0; i < APP_MAX_LCORE; i++) {
if (cfg->lcore_role[i] == ROLE_RTE)
cm |= (1ULL << i);
}
@@ -142,7 +142,7 @@ app_cpu_core_count(void)
char path[PATH_MAX];
uint32_t ncores = 0;
- for(i = 0; i < RTE_MAX_LCORE; i++) {
+ for (i = 0; i < APP_MAX_LCORE; i++) {
len = snprintf(path, sizeof(path), SYS_CPU_DIR, i);
if (len <= 0 || (unsigned)len >= sizeof(path))
continue;
@@ -162,7 +162,7 @@ static int
app_parse_opt_vals(const char *conf_str, char separator, uint32_t n_vals, uint32_t *opt_vals)
{
char *string;
- uint32_t i, n_tokens;
+ int i, n_tokens;
char *tokens[MAX_OPT_VALUES];
if (conf_str == NULL || opt_vals == NULL || n_vals == 0 || n_vals > MAX_OPT_VALUES)
@@ -175,9 +175,11 @@ app_parse_opt_vals(const char *conf_str, char separator, uint32_t n_vals, uint32
n_tokens = rte_strsplit(string, strnlen(string, 32), tokens, n_vals, separator);
- for(i = 0; i < n_tokens; i++) {
+ if (n_tokens > MAX_OPT_VALUES)
+ return -1;
+
+ for (i = 0; i < n_tokens; i++)
opt_vals[i] = (uint32_t)atol(tokens[i]);
- }
free(string);
@@ -270,7 +272,7 @@ app_parse_flow_conf(const char *conf_str)
}
if (pconf->tx_port >= RTE_MAX_ETHPORTS) {
RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu8" index\n",
- nb_pfc, pconf->rx_port);
+ nb_pfc, pconf->tx_port);
return -1;
}
diff --git a/examples/qos_sched/main.c b/examples/qos_sched/main.c
index e16b164d..e10cfd44 100644
--- a/examples/qos_sched/main.c
+++ b/examples/qos_sched/main.c
@@ -201,8 +201,6 @@ app_stat(void)
stats.oerrors - tx_stats[i].oerrors);
memcpy(&tx_stats[i], &stats, sizeof(stats));
- //printf("MP = %d\n", rte_mempool_count(conf->app_pktmbuf_pool));
-
#if APP_COLLECT_STAT
printf("-------+------------+------------+\n");
printf(" | received | dropped |\n");
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index 82aa0fae..c7490c61 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -68,7 +68,10 @@ extern "C" {
#define BURST_TX_DRAIN_US 100
-#define MAX_DATA_STREAMS (RTE_MAX_LCORE/2)
+#ifndef APP_MAX_LCORE
+#define APP_MAX_LCORE 64
+#endif
+#define MAX_DATA_STREAMS (APP_MAX_LCORE/2)
#define MAX_SCHED_SUBPORTS 8
#define MAX_SCHED_PIPES 4096
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index afc13665..c2087218 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -170,5 +170,5 @@ setup_shared_variables(void)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
}
diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c
index eb2f618a..4961089b 100644
--- a/examples/quota_watermark/qwctl/qwctl.c
+++ b/examples/quota_watermark/qwctl/qwctl.c
@@ -68,7 +68,7 @@ setup_shared_variables(void)
rte_exit(EXIT_FAILURE, "Couldn't find memzone\n");
quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
}
int main(int argc, char **argv)
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index f97d552a..622f248a 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -566,10 +566,9 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
struct rte_mbuf **m_table;
unsigned len, ret = 0;
const uint16_t lcore_id = rte_lcore_id();
- struct virtio_net *dev = vdev->dev;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n",
- dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
+ vdev->vid);
/* Add packet to the port tx queue */
tx_q = &lcore_tx_queue[lcore_id];
@@ -578,8 +577,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
tx_q->m_table[len] = m;
len++;
if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
+ dev_statistics[vdev->vid].tx_total++;
+ dev_statistics[vdev->vid].tx++;
}
if (unlikely(len == MAX_PKT_BURST)) {
@@ -614,7 +613,6 @@ static int
switch_worker(__rte_unused void *arg)
{
struct rte_mempool *mbuf_pool = arg;
- struct virtio_net *dev = NULL;
struct vhost_dev *vdev = NULL;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct virtio_net_data_ll *dev_ll;
@@ -651,7 +649,7 @@ switch_worker(__rte_unused void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after "
+ RTE_LOG(DEBUG, VHOST_DATA, "TX queue drained after "
"timeout with burst size %u\n",
tx_q->len);
ret = overlay_options.tx_handle(ports[0],
@@ -688,7 +686,6 @@ switch_worker(__rte_unused void *arg)
while (dev_ll != NULL) {
vdev = dev_ll->vdev;
- dev = vdev->dev;
if (unlikely(vdev->remove)) {
dev_ll = dev_ll->next;
@@ -709,22 +706,22 @@ switch_worker(__rte_unused void *arg)
* must be less than virtio queue size
*/
if (enable_retry && unlikely(rx_count >
- rte_vring_available_entries(dev, VIRTIO_RXQ))) {
+ rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))) {
for (retry = 0; retry < burst_rx_retry_num;
retry++) {
rte_delay_us(burst_rx_delay_time);
- if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
+ if (rx_count <= rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))
break;
}
}
- ret_count = overlay_options.rx_handle(dev, pkts_burst, rx_count);
+ ret_count = overlay_options.rx_handle(vdev->vid, pkts_burst, rx_count);
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_total_atomic,
+ &dev_statistics[vdev->vid].rx_total_atomic,
rx_count);
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_atomic, ret_count);
+ &dev_statistics[vdev->vid].rx_atomic, ret_count);
}
while (likely(rx_count)) {
rx_count--;
@@ -736,7 +733,7 @@ switch_worker(__rte_unused void *arg)
if (likely(!vdev->remove)) {
/* Handle guest TX*/
- tx_count = rte_vhost_dequeue_burst(dev,
+ tx_count = rte_vhost_dequeue_burst(vdev->vid,
VIRTIO_TXQ, mbuf_pool,
pkts_burst, MAX_PKT_BURST);
/* If this is the first received packet we need to learn the MAC */
@@ -908,23 +905,27 @@ init_data_ll(void)
/**
* Remove a device from the specific data core linked list and
* from the main linked list. Synchonization occurs through the use
- * of the lcore dev_removal_flag. Device is made volatile here
- * to avoid re-ordering of dev->remove=1 which can cause an infinite
- * loop in the rte_pause loop.
+ * of the lcore dev_removal_flag.
*/
static void
-destroy_device(volatile struct virtio_net *dev)
+destroy_device(int vid)
{
struct virtio_net_data_ll *ll_lcore_dev_cur;
struct virtio_net_data_ll *ll_main_dev_cur;
struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
struct virtio_net_data_ll *ll_main_dev_last = NULL;
- struct vhost_dev *vdev;
+ struct vhost_dev *vdev = NULL;
int lcore;
- dev->flags &= ~VIRTIO_DEV_RUNNING;
-
- vdev = (struct vhost_dev *)dev->priv;
+ ll_main_dev_cur = ll_root_used;
+ while (ll_main_dev_cur != NULL) {
+ if (ll_main_dev_cur->vdev->vid == vid) {
+ vdev = ll_main_dev_cur->vdev;
+ break;
+ }
+ }
+ if (!vdev)
+ return;
/* set the remove flag. */
vdev->remove = 1;
@@ -944,8 +945,7 @@ destroy_device(volatile struct virtio_net *dev)
if (ll_lcore_dev_cur == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find the dev to be destroy.\n",
- dev->device_fh);
+ "(%d) Failed to find the dev to be destroy.\n", vid);
return;
}
@@ -992,8 +992,8 @@ destroy_device(volatile struct virtio_net *dev)
/* Decrement number of device on the lcore. */
lcore_info[vdev->coreid].lcore_ll->device_num--;
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed "
- "from data core\n", dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been removed "
+ "from data core\n", vid);
rte_free(vdev);
@@ -1004,7 +1004,7 @@ destroy_device(volatile struct virtio_net *dev)
* to the main linked list and the allocated to a specific data core.
*/
static int
-new_device(struct virtio_net *dev)
+new_device(int vid)
{
struct virtio_net_data_ll *ll_dev;
int lcore, core_add = 0;
@@ -1014,18 +1014,16 @@ new_device(struct virtio_net *dev)
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
- dev->device_fh);
+ "(%d) Couldn't allocate memory for vhost dev\n", vid);
return -1;
}
- vdev->dev = dev;
- dev->priv = vdev;
+ vdev->vid = vid;
/* Add device to main ll */
ll_dev = get_data_ll_free_entry(&ll_root_free);
if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in"
+ RTE_LOG(INFO, VHOST_DATA, "(%d) No free entry found in"
" linked list Device limit of %d devices per core"
- " has been reached\n", dev->device_fh, nb_devices);
+ " has been reached\n", vid, nb_devices);
if (vdev->regions_hpa)
rte_free(vdev->regions_hpa);
rte_free(vdev);
@@ -1033,7 +1031,7 @@ new_device(struct virtio_net *dev)
}
ll_dev->vdev = vdev;
add_data_ll_entry(&ll_root_used, ll_dev);
- vdev->rx_q = dev->device_fh;
+ vdev->rx_q = vid;
/* reset ready flag */
vdev->ready = DEVICE_MAC_LEARNING;
@@ -1050,10 +1048,10 @@ new_device(struct virtio_net *dev)
ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
if (ll_dev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") Failed to add device to data core\n",
- dev->device_fh);
+ "(%d) Failed to add device to data core\n",
+ vid);
vdev->ready = DEVICE_SAFE_REMOVE;
- destroy_device(dev);
+ destroy_device(vid);
rte_free(vdev->regions_hpa);
rte_free(vdev);
return -1;
@@ -1065,17 +1063,16 @@ new_device(struct virtio_net *dev)
ll_dev);
/* Initialize device stats */
- memset(&dev_statistics[dev->device_fh], 0,
+ memset(&dev_statistics[vid], 0,
sizeof(struct device_statistics));
/* Disable notifications. */
- rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
- rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
lcore_info[vdev->coreid].lcore_ll->device_num++;
- dev->flags |= VIRTIO_DEV_RUNNING;
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n",
- dev->device_fh, vdev->coreid);
+ RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been added to data core %d\n",
+ vid, vdev->coreid);
return 0;
}
@@ -1099,7 +1096,7 @@ print_stats(void)
struct virtio_net_data_ll *dev_ll;
uint64_t tx_dropped, rx_dropped;
uint64_t tx, tx_total, rx, rx_total, rx_ip_csum, rx_l4_csum;
- uint32_t device_fh;
+ int vid;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
@@ -1113,22 +1110,22 @@ print_stats(void)
dev_ll = ll_root_used;
while (dev_ll != NULL) {
- device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
- tx_total = dev_statistics[device_fh].tx_total;
- tx = dev_statistics[device_fh].tx;
+ vid = dev_ll->vdev->vid;
+ tx_total = dev_statistics[vid].tx_total;
+ tx = dev_statistics[vid].tx;
tx_dropped = tx_total - tx;
rx_total = rte_atomic64_read(
- &dev_statistics[device_fh].rx_total_atomic);
+ &dev_statistics[vid].rx_total_atomic);
rx = rte_atomic64_read(
- &dev_statistics[device_fh].rx_atomic);
+ &dev_statistics[vid].rx_atomic);
rx_dropped = rx_total - rx;
rx_ip_csum = rte_atomic64_read(
- &dev_statistics[device_fh].rx_bad_ip_csum);
+ &dev_statistics[vid].rx_bad_ip_csum);
rx_l4_csum = rte_atomic64_read(
- &dev_statistics[device_fh].rx_bad_l4_csum);
+ &dev_statistics[vid].rx_bad_l4_csum);
- printf("\nStatistics for device %"PRIu32" ----------"
+ printf("\nStatistics for device %d ----------"
"\nTX total: %"PRIu64""
"\nTX dropped: %"PRIu64""
"\nTX successful: %"PRIu64""
@@ -1137,7 +1134,7 @@ print_stats(void)
"\nRX bad L4 csum: %"PRIu64""
"\nRX dropped: %"PRIu64""
"\nRX successful: %"PRIu64"",
- device_fh,
+ vid,
tx_total,
tx_dropped,
tx,
@@ -1190,8 +1187,6 @@ main(int argc, char *argv[])
/* Get the number of physical ports. */
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NB_PORTS and global array PORTS
@@ -1220,9 +1215,6 @@ main(int argc, char *argv[])
for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
vpool_array[queue_id].pool = mbuf_pool;
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
-
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
@@ -1251,7 +1243,7 @@ main(int argc, char *argv[])
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
ret = rte_thread_setname(tid, thread_name);
if (ret != 0)
- RTE_LOG(ERR, VHOST_CONFIG, "Cannot set print-stats name\n");
+ RTE_LOG(DEBUG, VHOST_CONFIG, "Cannot set print-stats name\n");
}
/* Launch all data cores. */
@@ -1262,7 +1254,7 @@ main(int argc, char *argv[])
rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
/* Register CUSE device to handle IOCTLs. */
- ret = rte_vhost_driver_register((char *)&dev_basename);
+ ret = rte_vhost_driver_register((char *)&dev_basename, 0);
if (ret != 0)
rte_exit(EXIT_FAILURE, "CUSE device setup failure.\n");
diff --git a/examples/tep_termination/main.h b/examples/tep_termination/main.h
index a34301ad..c0ea7667 100644
--- a/examples/tep_termination/main.h
+++ b/examples/tep_termination/main.h
@@ -36,14 +36,6 @@
#include <rte_ether.h>
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
@@ -79,8 +71,7 @@ struct device_statistics {
* Device linked list structure for data path.
*/
struct vhost_dev {
- /**< Pointer to device created by vhost lib. */
- struct virtio_net *dev;
+ int vid;
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
/**< Memory region information for gpa to hpa translation. */
@@ -124,6 +115,6 @@ struct virtio_net_data_ll {
};
uint32_t
-virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count);
+virtio_dev_rx(int vid, struct rte_mbuf **pkts, uint32_t count);
#endif /* _MAIN_H_ */
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index 2a48e142..37575c27 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -244,17 +244,16 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
{
int i, ret;
struct ether_hdr *pkt_hdr;
- struct virtio_net *dev = vdev->dev;
- uint64_t portid = dev->device_fh;
+ uint64_t portid = vdev->vid;
struct ipv4_hdr *ip;
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
if (unlikely(portid > VXLAN_N_PORTS)) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") WARNING: Not configuring device,"
+ "(%d) WARNING: Not configuring device,"
"as already have %d ports for VXLAN.",
- dev->device_fh, VXLAN_N_PORTS);
+ vdev->vid, VXLAN_N_PORTS);
return -1;
}
@@ -262,9 +261,9 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") WARNING: This device is using an existing"
+ "(%d) WARNING: This device is using an existing"
" MAC address and has not been registered.\n",
- dev->device_fh);
+ vdev->vid);
return -1;
}
@@ -425,8 +424,7 @@ vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
/* Check for decapsulation and pass packets directly to VIRTIO device */
int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
- uint32_t rx_count)
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
{
uint32_t i = 0;
uint32_t count = 0;
@@ -436,11 +434,11 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
for (i = 0; i < rx_count; i++) {
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ &dev_statistics[vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
!= 0);
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ &dev_statistics[vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
!= 0);
}
@@ -452,6 +450,6 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
count++;
}
- ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count);
+ ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
return ret;
}
diff --git a/examples/tep_termination/vxlan_setup.h b/examples/tep_termination/vxlan_setup.h
index 1846540f..8d264619 100644
--- a/examples/tep_termination/vxlan_setup.h
+++ b/examples/tep_termination/vxlan_setup.h
@@ -55,10 +55,10 @@ typedef void (*ol_tunnel_destroy_t)(struct vhost_dev *vdev);
typedef int (*ol_tx_handle_t)(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
-typedef int (*ol_rx_handle_t)(struct virtio_net *dev, struct rte_mbuf **pkts,
+typedef int (*ol_rx_handle_t)(int vid, struct rte_mbuf **pkts,
uint32_t count);
-typedef int (*ol_param_handle)(struct virtio_net *dev);
+typedef int (*ol_param_handle)(int vid);
struct ol_switch_ops {
ol_port_configure_t port_configure;
@@ -82,6 +82,6 @@ int
vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count);
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts, uint32_t count);
#endif /* VXLAN_SETUP_H_ */
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 28c17afd..3aff2cc8 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,26 +62,9 @@
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
-/*
- * Calculate the number of buffers needed per port
- */
-#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
- (num_switching_cores*MAX_PKT_BURST) + \
- (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
- ((num_switching_cores+1)*MBUF_CACHE_SIZE))
-
#define MBUF_CACHE_SIZE 128
#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
-/*
- * No frame data buffer allocated from host are required for zero copy
- * implementation, guest will allocate the frame data buffer, and vhost
- * directly use it.
- */
-#define VIRTIO_DESCRIPTOR_LEN_ZCP RTE_MBUF_DEFAULT_DATAROOM
-#define MBUF_DATA_SIZE_ZCP RTE_MBUF_DEFAULT_BUF_SIZE
-#define MBUF_CACHE_SIZE_ZCP 0
-
#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@@ -95,33 +78,10 @@
#define DEVICE_RX 1
#define DEVICE_SAFE_REMOVE 2
-/* Config_core_flag status definitions. */
-#define REQUEST_DEV_REMOVAL 1
-#define ACK_DEV_REMOVAL 0
-
/* Configurable number of RX/TX ring descriptors */
#define RTE_TEST_RX_DESC_DEFAULT 1024
#define RTE_TEST_TX_DESC_DEFAULT 512
-/*
- * Need refine these 2 macros for legacy and DPDK based front end:
- * Max vring avail descriptor/entries from guest - MAX_PKT_BURST
- * And then adjust power 2.
- */
-/*
- * For legacy front end, 128 descriptors,
- * half for virtio header, another half for mbuf.
- */
-#define RTE_TEST_RX_DESC_DEFAULT_ZCP 32 /* legacy: 32, DPDK virt FE: 128. */
-#define RTE_TEST_TX_DESC_DEFAULT_ZCP 64 /* legacy: 64, DPDK virt FE: 64. */
-
-/* Get first 4 bytes in mbuf headroom. */
-#define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
- + sizeof(struct rte_mbuf)))
-
-/* true if x is a power of 2 */
-#define POWEROF2(x) ((((x)-1) & (x)) == 0)
-
#define INVALID_PORT_ID 0xFF
/* Max number of devices. Limited by vmdq. */
@@ -136,50 +96,22 @@
/* Maximum long option length for option parsing. */
#define MAX_LONG_OPT_SZ 64
-/* Used to compare MAC addresses. */
-#define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
-
-/* Number of descriptors per cacheline. */
-#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
-
-#define MBUF_EXT_MEM(mb) (rte_mbuf_from_indirect(mb) != (mb))
-
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
/* Promiscuous mode */
static uint32_t promiscuous;
-/*Number of switching cores enabled*/
-static uint32_t num_switching_cores = 0;
-
/* number of devices/queues to support*/
static uint32_t num_queues = 0;
static uint32_t num_devices;
-/*
- * Enable zero copy, pkts buffer will directly dma to hw descriptor,
- * disabled on default.
- */
-static uint32_t zero_copy;
+static struct rte_mempool *mbuf_pool;
static int mergeable;
/* Do vlan strip on host, enabled on default */
static uint32_t vlan_strip = 1;
-/* number of descriptors to apply*/
-static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP;
-static uint32_t num_tx_descriptor = RTE_TEST_TX_DESC_DEFAULT_ZCP;
-
-/* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
-#define MAX_RING_DESC 4096
-
-struct vpool {
- struct rte_mempool *pool;
- struct rte_ring *ring;
- uint32_t buf_size;
-} vpool_array[MAX_QUEUES+MAX_QUEUES];
-
/* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
typedef enum {
VM2VM_DISABLED = 0,
@@ -189,14 +121,6 @@ typedef enum {
} vm2vm_type;
static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
-/* The type of host physical address translated from guest physical address. */
-typedef enum {
- PHYS_ADDR_CONTINUOUS = 0,
- PHYS_ADDR_CROSS_SUBREG = 1,
- PHYS_ADDR_INVALID = 2,
- PHYS_ADDR_LAST
-} hpa_type;
-
/* Enable stats. */
static uint32_t enable_stats = 0;
/* Enable retries on RX. */
@@ -208,6 +132,8 @@ static uint32_t enable_tx_csum;
/* Disable TSO offload */
static uint32_t enable_tso;
+static int client_mode;
+
/* Specify timeout (in useconds) between retries on RX. */
static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
/* Specify the number of retries on RX. */
@@ -259,7 +185,6 @@ static uint16_t num_pf_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
static uint16_t queues_per_pool;
-static const uint16_t external_pkt_default_vlan_tag = 2000;
const uint16_t vlan_tags[] = {
1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
@@ -274,11 +199,9 @@ const uint16_t vlan_tags[] = {
/* ethernet addresses of ports */
static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
-/* heads for the main used and free linked lists for the data path. */
-static struct virtio_net_data_ll *ll_root_used = NULL;
-static struct virtio_net_data_ll *ll_root_free = NULL;
+static struct vhost_dev_tailq_list vhost_dev_list =
+ TAILQ_HEAD_INITIALIZER(vhost_dev_list);
-/* Array of data core structures containing information on individual core linked lists. */
static struct lcore_info lcore_info[RTE_MAX_LCORE];
/* Used for queueing bursts of TX packets. */
@@ -291,32 +214,9 @@ struct mbuf_table {
/* TX queue for each data core. */
struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
-/* TX queue fori each virtio device for zero copy. */
-struct mbuf_table tx_queue_zcp[MAX_QUEUES];
-
-/* Vlan header struct used to insert vlan tags on TX. */
-struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- __be16 h_vlan_encapsulated_proto;
-};
-
-/* Header lengths. */
+#define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
+ / US_PER_S * BURST_TX_DRAIN_US)
#define VLAN_HLEN 4
-#define VLAN_ETH_HLEN 18
-
-/* Per-device statistics struct */
-struct device_statistics {
- uint64_t tx_total;
- rte_atomic64_t rx_total_atomic;
- uint64_t rx_total;
- uint64_t tx;
- rte_atomic64_t rx_atomic;
- uint64_t rx;
-} __rte_cache_aligned;
-struct device_statistics dev_statistics[MAX_DEVICES];
/*
* Builds up the correct configuration for VMDQ VLAN pool map
@@ -394,29 +294,12 @@ port_init(uint8_t port)
/* Enable vlan offload */
txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
- /*
- * Zero copy defers queue RX/TX start to the time when guest
- * finishes its startup and packet buffers from that guest are
- * available.
- */
- if (zero_copy) {
- rxconf->rx_deferred_start = 1;
- rxconf->rx_drop_en = 0;
- txconf->tx_deferred_start = 1;
- }
-
/*configure the number of supported virtio devices based on VMDQ limits */
num_devices = dev_info.max_vmdq_pools;
- if (zero_copy) {
- rx_ring_size = num_rx_descriptor;
- tx_ring_size = num_tx_descriptor;
- tx_rings = dev_info.max_tx_queues;
- } else {
- rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
- tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
- tx_rings = (uint16_t)rte_lcore_count();
- }
+ rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ tx_rings = (uint16_t)rte_lcore_count();
retval = validate_num_devices(MAX_DEVICES);
if (retval < 0)
@@ -457,7 +340,7 @@ port_init(uint8_t port)
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
rte_eth_dev_socket_id(port),
rxconf,
- vpool_array[q].pool);
+ mbuf_pool);
if (retval < 0)
return retval;
}
@@ -576,14 +459,9 @@ us_vhost_usage(const char *prgname)
" --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
" --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
" --dev-basename: The basename to be used for the character device.\n"
- " --zero-copy [0|1]: disable(default)/enable rx/tx "
- "zero copy\n"
- " --rx-desc-num [0-N]: the number of descriptors on rx, "
- "used only when zero copy is enabled.\n"
- " --tx-desc-num [0-N]: the number of descriptors on tx, "
- "used only when zero copy is enabled.\n"
" --tx-csum [0|1] disable/enable TX checksum offload.\n"
- " --tso [0|1] disable/enable TCP segment offload.\n",
+ " --tso [0|1] disable/enable TCP segment offload.\n"
+ " --client register a vhost-user socket as client mode.\n",
prgname);
}
@@ -606,11 +484,9 @@ us_vhost_parse_args(int argc, char **argv)
{"vlan-strip", required_argument, NULL, 0},
{"stats", required_argument, NULL, 0},
{"dev-basename", required_argument, NULL, 0},
- {"zero-copy", required_argument, NULL, 0},
- {"rx-desc-num", required_argument, NULL, 0},
- {"tx-desc-num", required_argument, NULL, 0},
{"tx-csum", required_argument, NULL, 0},
{"tso", required_argument, NULL, 0},
+ {"client", no_argument, &client_mode, 1},
{NULL, 0, 0, 0},
};
@@ -765,50 +641,6 @@ us_vhost_parse_args(int argc, char **argv)
}
}
- /* Enable/disable rx/tx zero copy. */
- if (!strncmp(long_option[option_index].name,
- "zero-copy", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument"
- " for zero-copy [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else
- zero_copy = ret;
- }
-
- /* Specify the descriptor number on RX. */
- if (!strncmp(long_option[option_index].name,
- "rx-desc-num", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, MAX_RING_DESC);
- if ((ret == -1) || (!POWEROF2(ret))) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for rx-desc-num[0-N],"
- "power of 2 required.\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- num_rx_descriptor = ret;
- }
- }
-
- /* Specify the descriptor number on TX. */
- if (!strncmp(long_option[option_index].name,
- "tx-desc-num", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, MAX_RING_DESC);
- if ((ret == -1) || (!POWEROF2(ret))) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for tx-desc-num [0-N],"
- "power of 2 required.\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- num_tx_descriptor = ret;
- }
- }
-
break;
/* Invalid option - print options. */
@@ -829,21 +661,6 @@ us_vhost_parse_args(int argc, char **argv)
return -1;
}
- if ((zero_copy == 1) && (vm2vm_mode == VM2VM_SOFTWARE)) {
- RTE_LOG(INFO, VHOST_PORT,
- "Vhost zero copy doesn't support software vm2vm,"
- "please specify 'vm2vm 2' to use hardware vm2vm.\n");
- return -1;
- }
-
- if ((zero_copy == 1) && (vmdq_conf_default.rxmode.jumbo_frame == 1)) {
- RTE_LOG(INFO, VHOST_PORT,
- "Vhost zero copy doesn't support jumbo frame,"
- "please specify '--mergeable 0' to disable the "
- "mergeable feature.\n");
- return -1;
- }
-
return 0;
}
@@ -873,74 +690,18 @@ static unsigned check_ports_num(unsigned nb_ports)
return valid_num_ports;
}
-/*
- * Macro to print out packet contents. Wrapped in debug define so that the
- * data path is not effected when debug is disabled.
- */
-#ifdef DEBUG
-#define PRINT_PACKET(device, addr, size, header) do { \
- char *pkt_addr = (char*)(addr); \
- unsigned int index; \
- char packet[MAX_PRINT_BUFF]; \
- \
- if ((header)) \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
- else \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
- for (index = 0; index < (size); index++) { \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
- "%02hhx ", pkt_addr[index]); \
- } \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
- \
- LOG_DEBUG(VHOST_DATA, "%s", packet); \
-} while(0)
-#else
-#define PRINT_PACKET(device, addr, size, header) do{} while(0)
-#endif
-
-/*
- * Function to convert guest physical addresses to vhost physical addresses.
- * This is used to convert virtio buffer addresses.
- */
-static inline uint64_t __attribute__((always_inline))
-gpa_to_hpa(struct vhost_dev *vdev, uint64_t guest_pa,
- uint32_t buf_len, hpa_type *addr_type)
+static inline struct vhost_dev *__attribute__((always_inline))
+find_vhost_dev(struct ether_addr *mac)
{
- struct virtio_memory_regions_hpa *region;
- uint32_t regionidx;
- uint64_t vhost_pa = 0;
-
- *addr_type = PHYS_ADDR_INVALID;
-
- for (regionidx = 0; regionidx < vdev->nregions_hpa; regionidx++) {
- region = &vdev->regions_hpa[regionidx];
- if ((guest_pa >= region->guest_phys_address) &&
- (guest_pa <= region->guest_phys_address_end)) {
- vhost_pa = region->host_phys_addr_offset + guest_pa;
- if (likely((guest_pa + buf_len - 1)
- <= region->guest_phys_address_end))
- *addr_type = PHYS_ADDR_CONTINUOUS;
- else
- *addr_type = PHYS_ADDR_CROSS_SUBREG;
- break;
- }
- }
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| HPA %p\n",
- vdev->dev->device_fh, (void *)(uintptr_t)guest_pa,
- (void *)(uintptr_t)vhost_pa);
+ struct vhost_dev *vdev;
- return vhost_pa;
-}
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->ready == DEVICE_RX &&
+ is_same_ether_addr(mac, &vdev->mac_address))
+ return vdev;
+ }
-/*
- * Compares a packet destination MAC address to a device MAC address.
- */
-static inline int __attribute__((always_inline))
-ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
-{
- return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
+ return NULL;
}
/*
@@ -951,32 +712,28 @@ static int
link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
{
struct ether_hdr *pkt_hdr;
- struct virtio_net_data_ll *dev_ll;
- struct virtio_net *dev = vdev->dev;
int i, ret;
/* Learn MAC address of guest device from packet */
pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- dev_ll = ll_root_used;
-
- while (dev_ll != NULL) {
- if (ether_addr_cmp(&(pkt_hdr->s_addr), &dev_ll->vdev->mac_address)) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
- return -1;
- }
- dev_ll = dev_ll->next;
+ if (find_vhost_dev(&pkt_hdr->s_addr)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) device is using a registered MAC!\n",
+ vdev->vid);
+ return -1;
}
for (i = 0; i < ETHER_ADDR_LEN; i++)
vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
/* vlan_tag currently uses the device_id. */
- vdev->vlan_tag = vlan_tags[dev->device_fh];
+ vdev->vlan_tag = vlan_tags[vdev->vid];
/* Print out VMDQ registration info. */
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
- dev->device_fh,
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
+ vdev->vid,
vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
@@ -984,10 +741,11 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
/* Register the MAC address. */
ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
- (uint32_t)dev->device_fh + vmdq_pool_base);
+ (uint32_t)vdev->vid + vmdq_pool_base);
if (ret)
- RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
- dev->device_fh);
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) failed to add device MAC address to VMDQ\n",
+ vdev->vid);
/* Enable stripping of the vlan tag as we handle routing. */
if (vlan_strip)
@@ -1035,6 +793,21 @@ unlink_vmdq(struct vhost_dev *vdev)
}
}
+static inline void __attribute__((always_inline))
+virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+ struct rte_mbuf *m)
+{
+ uint16_t ret;
+
+ ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+ if (enable_stats) {
+ rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
+ rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
+ src_vdev->stats.tx_total++;
+ src_vdev->stats.tx += ret;
+ }
+}
+
/*
* Check if the packet destination MAC address is for a local device. If so then put
* the packet on that devices RX queue. If not then return.
@@ -1042,56 +815,33 @@ unlink_vmdq(struct vhost_dev *vdev)
static inline int __attribute__((always_inline))
virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
{
- struct virtio_net_data_ll *dev_ll;
struct ether_hdr *pkt_hdr;
- uint64_t ret = 0;
- struct virtio_net *dev = vdev->dev;
- struct virtio_net *tdev; /* destination virito device */
+ struct vhost_dev *dst_vdev;
pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- /*get the used devices list*/
- dev_ll = ll_root_used;
+ dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+ if (!dst_vdev)
+ return -1;
- while (dev_ll != NULL) {
- if ((dev_ll->vdev->ready == DEVICE_RX) && ether_addr_cmp(&(pkt_hdr->d_addr),
- &dev_ll->vdev->mac_address)) {
+ if (vdev->vid == dst_vdev->vid) {
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: src and dst MAC is same. Dropping packet.\n",
+ vdev->vid);
+ return 0;
+ }
- /* Drop the packet if the TX packet is destined for the TX device. */
- if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
- dev->device_fh);
- return 0;
- }
- tdev = dev_ll->vdev->dev;
-
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", tdev->device_fh);
-
- if (unlikely(dev_ll->vdev->remove)) {
- /*drop the packet if the device is marked for removal*/
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", tdev->device_fh);
- } else {
- /*send the packet to the local virtio device*/
- ret = rte_vhost_enqueue_burst(tdev, VIRTIO_RXQ, &m, 1);
- if (enable_stats) {
- rte_atomic64_add(
- &dev_statistics[tdev->device_fh].rx_total_atomic,
- 1);
- rte_atomic64_add(
- &dev_statistics[tdev->device_fh].rx_atomic,
- ret);
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx += ret;
- }
- }
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: MAC address is local\n", dst_vdev->vid);
- return 0;
- }
- dev_ll = dev_ll->next;
+ if (unlikely(dst_vdev->remove)) {
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) device is marked for removal\n", dst_vdev->vid);
+ return 0;
}
- return -1;
+ virtio_xmit(dst_vdev, vdev, m);
+ return 0;
}
/*
@@ -1099,49 +849,35 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
* and get its vlan tag, and offset if it is.
*/
static inline int __attribute__((always_inline))
-find_local_dest(struct virtio_net *dev, struct rte_mbuf *m,
+find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
uint32_t *offset, uint16_t *vlan_tag)
{
- struct virtio_net_data_ll *dev_ll = ll_root_used;
+ struct vhost_dev *dst_vdev;
struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- while (dev_ll != NULL) {
- if ((dev_ll->vdev->ready == DEVICE_RX)
- && ether_addr_cmp(&(pkt_hdr->d_addr),
- &dev_ll->vdev->mac_address)) {
- /*
- * Drop the packet if the TX packet is
- * destined for the TX device.
- */
- if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") TX: Source and destination"
- " MAC addresses are the same. Dropping "
- "packet.\n",
- dev_ll->vdev->dev->device_fh);
- return -1;
- }
-
- /*
- * HW vlan strip will reduce the packet length
- * by minus length of vlan tag, so need restore
- * the packet length by plus it.
- */
- *offset = VLAN_HLEN;
- *vlan_tag =
- (uint16_t)
- vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh];
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") TX: pkt to local VM device id:"
- "(%"PRIu64") vlan tag: %d.\n",
- dev->device_fh, dev_ll->vdev->dev->device_fh,
- (int)*vlan_tag);
+ dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+ if (!dst_vdev)
+ return 0;
- break;
- }
- dev_ll = dev_ll->next;
+ if (vdev->vid == dst_vdev->vid) {
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: src and dst MAC is same. Dropping packet.\n",
+ vdev->vid);
+ return -1;
}
+
+ /*
+ * HW vlan strip will reduce the packet length
+ * by minus length of vlan tag, so need restore
+ * the packet length by plus it.
+ */
+ *offset = VLAN_HLEN;
+ *vlan_tag = vlan_tags[vdev->vid];
+
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
+ vdev->vid, dst_vdev->vid, *vlan_tag);
+
return 0;
}
@@ -1173,20 +909,49 @@ static void virtio_tx_offload(struct rte_mbuf *m)
tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
}
+static inline void
+free_pkts(struct rte_mbuf **pkts, uint16_t n)
+{
+ while (n--)
+ rte_pktmbuf_free(pkts[n]);
+}
+
+static inline void __attribute__((always_inline))
+do_drain_mbuf_table(struct mbuf_table *tx_q)
+{
+ uint16_t count;
+
+ count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
+ tx_q->m_table, tx_q->len);
+ if (unlikely(count < tx_q->len))
+ free_pkts(&tx_q->m_table[count], tx_q->len - count);
+
+ tx_q->len = 0;
+}
+
/*
- * This function routes the TX packet to the correct interface. This may be a local device
- * or the physical port.
+ * This function routes the TX packet to the correct interface. This
+ * may be a local device or the physical port.
*/
static inline void __attribute__((always_inline))
virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
{
struct mbuf_table *tx_q;
- struct rte_mbuf **m_table;
- unsigned len, ret, offset = 0;
+ unsigned offset = 0;
const uint16_t lcore_id = rte_lcore_id();
- struct virtio_net *dev = vdev->dev;
struct ether_hdr *nh;
+
+ nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
+ struct vhost_dev *vdev2;
+
+ TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
+ virtio_xmit(vdev2, vdev, m);
+ }
+ goto queue2nic;
+ }
+
/*check if destination is local VM*/
if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
rte_pktmbuf_free(m);
@@ -1194,17 +959,20 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
}
if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
- if (unlikely(find_local_dest(dev, m, &offset, &vlan_tag) != 0)) {
+ if (unlikely(find_local_dest(vdev, m, &offset,
+ &vlan_tag) != 0)) {
rte_pktmbuf_free(m);
return;
}
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: MAC address is external\n", vdev->vid);
+
+queue2nic:
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
- len = tx_q->len;
nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
@@ -1242,1411 +1010,213 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
if (m->ol_flags & PKT_TX_TCP_SEG)
virtio_tx_offload(m);
- tx_q->m_table[len] = m;
- len++;
+ tx_q->m_table[tx_q->len++] = m;
if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
- }
-
- if (unlikely(len == MAX_PKT_BURST)) {
- m_table = (struct rte_mbuf **)tx_q->m_table;
- ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
- /* Free any buffers not handled by TX and update the port stats. */
- if (unlikely(ret < len)) {
- do {
- rte_pktmbuf_free(m_table[ret]);
- } while (++ret < len);
- }
-
- len = 0;
+ vdev->stats.tx_total++;
+ vdev->stats.tx++;
}
- tx_q->len = len;
- return;
+ if (unlikely(tx_q->len == MAX_PKT_BURST))
+ do_drain_mbuf_table(tx_q);
}
-/*
- * This function is called by each data core. It handles all RX/TX registered with the
- * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
- * with all devices in the main linked list.
- */
-static int
-switch_worker(__attribute__((unused)) void *arg)
-{
- struct rte_mempool *mbuf_pool = arg;
- struct virtio_net *dev = NULL;
- struct vhost_dev *vdev = NULL;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct virtio_net_data_ll *dev_ll;
- struct mbuf_table *tx_q;
- volatile struct lcore_ll_info *lcore_ll;
- const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
- uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
- unsigned ret, i;
- const uint16_t lcore_id = rte_lcore_id();
- const uint16_t num_cores = (uint16_t)rte_lcore_count();
- uint16_t rx_count = 0;
- uint16_t tx_count;
- uint32_t retry = 0;
-
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
- lcore_ll = lcore_info[lcore_id].lcore_ll;
- prev_tsc = 0;
-
- tx_q = &lcore_tx_queue[lcore_id];
- for (i = 0; i < num_cores; i ++) {
- if (lcore_ids[i] == lcore_id) {
- tx_q->txq_id = i;
- break;
- }
- }
-
- while(1) {
- cur_tsc = rte_rdtsc();
- /*
- * TX burst queue drain
- */
- diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
-
- if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
-
- /*Tx any packets in the queue*/
- ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
- (struct rte_mbuf **)tx_q->m_table,
- (uint16_t)tx_q->len);
- if (unlikely(ret < tx_q->len)) {
- do {
- rte_pktmbuf_free(tx_q->m_table[ret]);
- } while (++ret < tx_q->len);
- }
-
- tx_q->len = 0;
- }
- prev_tsc = cur_tsc;
- }
-
- rte_prefetch0(lcore_ll->ll_root_used);
- /*
- * Inform the configuration core that we have exited the linked list and that no devices are
- * in use if requested.
- */
- if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
- lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
-
- /*
- * Process devices
- */
- dev_ll = lcore_ll->ll_root_used;
-
- while (dev_ll != NULL) {
- /*get virtio device ID*/
- vdev = dev_ll->vdev;
- dev = vdev->dev;
-
- if (unlikely(vdev->remove)) {
- dev_ll = dev_ll->next;
- unlink_vmdq(vdev);
- vdev->ready = DEVICE_SAFE_REMOVE;
- continue;
- }
- if (likely(vdev->ready == DEVICE_RX)) {
- /*Handle guest RX*/
- rx_count = rte_eth_rx_burst(ports[0],
- vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
-
- if (rx_count) {
- /*
- * Retry is enabled and the queue is full then we wait and retry to avoid packet loss
- * Here MAX_PKT_BURST must be less than virtio queue size
- */
- if (enable_retry && unlikely(rx_count > rte_vring_available_entries(dev, VIRTIO_RXQ))) {
- for (retry = 0; retry < burst_rx_retry_num; retry++) {
- rte_delay_us(burst_rx_delay_time);
- if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
- break;
- }
- }
- ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_burst, rx_count);
- if (enable_stats) {
- rte_atomic64_add(
- &dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic,
- rx_count);
- rte_atomic64_add(
- &dev_statistics[dev_ll->vdev->dev->device_fh].rx_atomic, ret_count);
- }
- while (likely(rx_count)) {
- rx_count--;
- rte_pktmbuf_free(pkts_burst[rx_count]);
- }
+static inline void __attribute__((always_inline))
+drain_mbuf_table(struct mbuf_table *tx_q)
+{
+ static uint64_t prev_tsc;
+ uint64_t cur_tsc;
- }
- }
+ if (tx_q->len == 0)
+ return;
- if (likely(!vdev->remove)) {
- /* Handle guest TX*/
- tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST);
- /* If this is the first received packet we need to learn the MAC and setup VMDQ */
- if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
- if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
- while (tx_count)
- rte_pktmbuf_free(pkts_burst[--tx_count]);
- }
- }
- for (i = 0; i < tx_count; ++i) {
- virtio_tx_route(vdev, pkts_burst[i],
- vlan_tags[(uint16_t)dev->device_fh]);
- }
- }
+ cur_tsc = rte_rdtsc();
+ if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
+ prev_tsc = cur_tsc;
- /*move to the next device in the list*/
- dev_ll = dev_ll->next;
- }
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "TX queue drained after timeout with burst size %u\n",
+ tx_q->len);
+ do_drain_mbuf_table(tx_q);
}
-
- return 0;
}
-/*
- * This function gets available ring number for zero copy rx.
- * Only one thread will call this funciton for a paticular virtio device,
- * so, it is designed as non-thread-safe function.
- */
-static inline uint32_t __attribute__((always_inline))
-get_available_ring_num_zcp(struct virtio_net *dev)
-{
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];
- uint16_t avail_idx;
-
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- return (uint32_t)(avail_idx - vq->last_used_idx_res);
-}
-
-/*
- * This function gets available ring index for zero copy rx,
- * it will retry 'burst_rx_retry_num' times till it get enough ring index.
- * Only one thread will call this funciton for a paticular virtio device,
- * so, it is designed as non-thread-safe function.
- */
-static inline uint32_t __attribute__((always_inline))
-get_available_ring_index_zcp(struct virtio_net *dev,
- uint16_t *res_base_idx, uint32_t count)
+static inline void __attribute__((always_inline))
+drain_eth_rx(struct vhost_dev *vdev)
{
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];
- uint16_t avail_idx;
- uint32_t retry = 0;
- uint16_t free_entries;
-
- *res_base_idx = vq->last_used_idx_res;
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- free_entries = (avail_idx - *res_base_idx);
+ uint16_t rx_count, enqueue_count;
+ struct rte_mbuf *pkts[MAX_PKT_BURST];
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") in get_available_ring_index_zcp: "
- "avail idx: %d, "
- "res base idx:%d, free entries:%d\n",
- dev->device_fh, avail_idx, *res_base_idx,
- free_entries);
+ rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
+ pkts, MAX_PKT_BURST);
+ if (!rx_count)
+ return;
/*
- * If retry is enabled and the queue is full then we wait
- * and retry to avoid packet loss.
+ * When "enable_retry" is set, here we wait and retry when there
+ * is no enough free slots in the queue to hold @rx_count packets,
+ * to diminish packet loss.
*/
- if (enable_retry && unlikely(count > free_entries)) {
+ if (enable_retry &&
+ unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
+ VIRTIO_RXQ))) {
+ uint32_t retry;
+
for (retry = 0; retry < burst_rx_retry_num; retry++) {
rte_delay_us(burst_rx_delay_time);
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- free_entries = (avail_idx - *res_base_idx);
- if (count <= free_entries)
+ if (rx_count <= rte_vhost_avail_entries(vdev->vid,
+ VIRTIO_RXQ))
break;
}
}
- /*check that we have enough buffers*/
- if (unlikely(count > free_entries))
- count = free_entries;
-
- if (unlikely(count == 0)) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") Fail in get_available_ring_index_zcp: "
- "avail idx: %d, res base idx:%d, free entries:%d\n",
- dev->device_fh, avail_idx,
- *res_base_idx, free_entries);
- return 0;
+ enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ pkts, rx_count);
+ if (enable_stats) {
+ rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
+ rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
}
- vq->last_used_idx_res = *res_base_idx + count;
-
- return count;
+ free_pkts(pkts, rx_count);
}
-/*
- * This function put descriptor back to used list.
- */
static inline void __attribute__((always_inline))
-put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx)
+drain_virtio_tx(struct vhost_dev *vdev)
{
- uint16_t res_cur_idx = vq->last_used_idx;
- vq->used->ring[res_cur_idx & (vq->size - 1)].id = (uint32_t)desc_idx;
- vq->used->ring[res_cur_idx & (vq->size - 1)].len = 0;
- rte_compiler_barrier();
- *(volatile uint16_t *)&vq->used->idx += 1;
- vq->last_used_idx += 1;
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write(vq->callfd, (eventfd_t)1);
-}
+ struct rte_mbuf *pkts[MAX_PKT_BURST];
+ uint16_t count;
+ uint16_t i;
-/*
- * This function get available descriptor from vitio vring and un-attached mbuf
- * from vpool->ring, and then attach them together. It needs adjust the offset
- * for buff_addr and phys_addr accroding to PMD implementation, otherwise the
- * frame data may be put to wrong location in mbuf.
- */
-static inline void __attribute__((always_inline))
-attach_rxmbuf_zcp(struct virtio_net *dev)
-{
- uint16_t res_base_idx, desc_idx;
- uint64_t buff_addr, phys_addr;
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- void *obj = NULL;
- struct rte_mbuf *mbuf;
- struct vpool *vpool;
- hpa_type addr_type;
- struct vhost_dev *vdev = (struct vhost_dev *)dev->priv;
-
- vpool = &vpool_array[vdev->vmdq_rx_q];
- vq = dev->virtqueue[VIRTIO_RXQ];
-
- do {
- if (unlikely(get_available_ring_index_zcp(vdev->dev, &res_base_idx,
- 1) != 1))
- return;
- desc_idx = vq->avail->ring[(res_base_idx) & (vq->size - 1)];
-
- desc = &vq->desc[desc_idx];
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc = &vq->desc[desc->next];
- buff_addr = gpa_to_vva(dev, desc->addr);
- phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len,
- &addr_type);
- } else {
- buff_addr = gpa_to_vva(dev,
- desc->addr + vq->vhost_hlen);
- phys_addr = gpa_to_hpa(vdev,
- desc->addr + vq->vhost_hlen,
- desc->len, &addr_type);
- }
+ count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
+ pkts, MAX_PKT_BURST);
- if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
- RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Invalid frame buffer"
- " address found when attaching RX frame buffer"
- " address!\n", dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- continue;
- }
-
- /*
- * Check if the frame buffer address from guest crosses
- * sub-region or not.
- */
- if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Frame buffer address cross "
- "sub-regioin found when attaching RX frame "
- "buffer address!\n",
- dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- continue;
- }
- } while (unlikely(phys_addr == 0));
-
- rte_ring_sc_dequeue(vpool->ring, &obj);
- mbuf = obj;
- if (unlikely(mbuf == NULL)) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in attach_rxmbuf_zcp: "
- "ring_sc_dequeue fail.\n",
- dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- return;
- }
-
- if (unlikely(vpool->buf_size > desc->len)) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in attach_rxmbuf_zcp: frame buffer "
- "length(%d) of descriptor idx: %d less than room "
- "size required: %d\n",
- dev->device_fh, desc->len, desc_idx, vpool->buf_size);
- put_desc_to_used_list_zcp(vq, desc_idx);
- rte_ring_sp_enqueue(vpool->ring, obj);
- return;
+ /* setup VMDq for the first packet */
+ if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
+ if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
+ free_pkts(pkts, count);
}
- mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);
- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;
- mbuf->data_len = desc->len;
- MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in attach_rxmbuf_zcp: res base idx:%d, "
- "descriptor idx:%d\n",
- dev->device_fh, res_base_idx, desc_idx);
-
- __rte_mbuf_raw_free(mbuf);
-
- return;
+ for (i = 0; i < count; ++i)
+ virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
}
/*
- * Detach an attched packet mbuf -
- * - restore original mbuf address and length values.
- * - reset pktmbuf data and data_len to their default values.
- * All other fields of the given packet mbuf will be left intact.
+ * Main function of vhost-switch. It basically does:
*
- * @param m
- * The attached packet mbuf.
- */
-static inline void pktmbuf_detach_zcp(struct rte_mbuf *m)
-{
- const struct rte_mempool *mp = m->pool;
- void *buf = rte_mbuf_to_baddr(m);
- uint32_t buf_ofs;
- uint32_t buf_len = mp->elt_size - sizeof(*m);
- m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof(*m);
-
- m->buf_addr = buf;
- m->buf_len = (uint16_t)buf_len;
-
- buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
- RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->data_off = buf_ofs;
-
- m->data_len = 0;
-}
-
-/*
- * This function is called after packets have been transimited. It fetchs mbuf
- * from vpool->pool, detached it and put into vpool->ring. It also update the
- * used index and kick the guest if necessary.
- */
-static inline uint32_t __attribute__((always_inline))
-txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool)
-{
- struct rte_mbuf *mbuf;
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
- uint32_t used_idx = vq->last_used_idx & (vq->size - 1);
- uint32_t index = 0;
- uint32_t mbuf_count = rte_mempool_count(vpool->pool);
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool before "
- "clean is: %d\n",
- dev->device_fh, mbuf_count);
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring before "
- "clean is : %d\n",
- dev->device_fh, rte_ring_count(vpool->ring));
-
- for (index = 0; index < mbuf_count; index++) {
- mbuf = __rte_mbuf_raw_alloc(vpool->pool);
- if (likely(MBUF_EXT_MEM(mbuf)))
- pktmbuf_detach_zcp(mbuf);
- rte_ring_sp_enqueue(vpool->ring, mbuf);
-
- /* Update used index buffer information. */
- vq->used->ring[used_idx].id = MBUF_HEADROOM_UINT32(mbuf);
- vq->used->ring[used_idx].len = 0;
-
- used_idx = (used_idx + 1) & (vq->size - 1);
- }
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after "
- "clean is: %d\n",
- dev->device_fh, rte_mempool_count(vpool->pool));
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after "
- "clean is : %d\n",
- dev->device_fh, rte_ring_count(vpool->ring));
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: before updated "
- "vq->last_used_idx:%d\n",
- dev->device_fh, vq->last_used_idx);
-
- vq->last_used_idx += mbuf_count;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: after updated "
- "vq->last_used_idx:%d\n",
- dev->device_fh, vq->last_used_idx);
-
- rte_compiler_barrier();
-
- *(volatile uint16_t *)&vq->used->idx += mbuf_count;
-
- /* Kick guest if required. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write(vq->callfd, (eventfd_t)1);
-
- return 0;
-}
-
-/*
- * This function is called when a virtio device is destroy.
- * It fetchs mbuf from vpool->pool, and detached it, and put into vpool->ring.
- */
-static void mbuf_destroy_zcp(struct vpool *vpool)
-{
- struct rte_mbuf *mbuf = NULL;
- uint32_t index, mbuf_count = rte_mempool_count(vpool->pool);
-
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in mempool before "
- "mbuf_destroy_zcp is: %d\n",
- mbuf_count);
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in ring before "
- "mbuf_destroy_zcp is : %d\n",
- rte_ring_count(vpool->ring));
-
- for (index = 0; index < mbuf_count; index++) {
- mbuf = __rte_mbuf_raw_alloc(vpool->pool);
- if (likely(mbuf != NULL)) {
- if (likely(MBUF_EXT_MEM(mbuf)))
- pktmbuf_detach_zcp(mbuf);
- rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
- }
- }
-
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in mempool after "
- "mbuf_destroy_zcp is: %d\n",
- rte_mempool_count(vpool->pool));
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in ring after "
- "mbuf_destroy_zcp is : %d\n",
- rte_ring_count(vpool->ring));
-}
-
-/*
- * This function update the use flag and counter.
- */
-static inline uint32_t __attribute__((always_inline))
-virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
- uint32_t count)
-{
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- struct rte_mbuf *buff;
- /* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr
- = {{0, 0, 0, 0, 0, 0}, 0};
- uint64_t buff_hdr_addr = 0;
- uint32_t head[MAX_PKT_BURST], packet_len = 0;
- uint32_t head_idx, packet_success = 0;
- uint16_t res_cur_idx;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
-
- if (count == 0)
- return 0;
-
- vq = dev->virtqueue[VIRTIO_RXQ];
- count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
-
- res_cur_idx = vq->last_used_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
- dev->device_fh, res_cur_idx, res_cur_idx + count);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = MBUF_HEADROOM_UINT32(pkts[head_idx]);
-
- /*Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
-
- while (packet_success != count) {
- /* Get descriptor from available ring */
- desc = &vq->desc[head[packet_success]];
-
- buff = pkts[packet_success];
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: update the used idx for "
- "pkt[%d] descriptor idx: %d\n",
- dev->device_fh, packet_success,
- MBUF_HEADROOM_UINT32(buff));
-
- PRINT_PACKET(dev,
- (uintptr_t)(((uint64_t)(uintptr_t)buff->buf_addr)
- + RTE_PKTMBUF_HEADROOM),
- rte_pktmbuf_data_len(buff), 0);
-
- /* Buffer address translation for virtio header. */
- buff_hdr_addr = gpa_to_vva(dev, desc->addr);
- packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
-
- /*
- * If the descriptors are chained the header and data are
- * placed in separate buffers.
- */
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc->len = vq->vhost_hlen;
- desc = &vq->desc[desc->next];
- desc->len = rte_pktmbuf_data_len(buff);
- } else {
- desc->len = packet_len;
- }
-
- /* Update used ring with desc information */
- vq->used->ring[res_cur_idx & (vq->size - 1)].id
- = head[packet_success];
- vq->used->ring[res_cur_idx & (vq->size - 1)].len
- = packet_len;
- res_cur_idx++;
- packet_success++;
-
- /* A header is required per buffer. */
- rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
- (const void *)&virtio_hdr, vq->vhost_hlen);
-
- PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
-
- if (likely(packet_success < count)) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
- }
- }
-
- rte_compiler_barrier();
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: before update used idx: "
- "vq.last_used_idx: %d, vq->used->idx: %d\n",
- dev->device_fh, vq->last_used_idx, vq->used->idx);
-
- *(volatile uint16_t *)&vq->used->idx += count;
- vq->last_used_idx += count;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: after update used idx: "
- "vq.last_used_idx: %d, vq->used->idx: %d\n",
- dev->device_fh, vq->last_used_idx, vq->used->idx);
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write(vq->callfd, (eventfd_t)1);
-
- return count;
-}
-
-/*
- * This function routes the TX packet to the correct interface.
- * This may be a local device or the physical port.
+ * for each vhost device {
+ * - drain_eth_rx()
+ *
+ * Which drains the host eth Rx queue linked to the vhost device,
+ * and deliver all of them to guest virito Rx ring associated with
+ * this vhost device.
+ *
+ * - drain_virtio_tx()
+ *
+ * Which drains the guest virtio Tx queue and deliver all of them
+ * to the target, which could be another vhost device, or the
+ * physical eth dev. The route is done in function "virtio_tx_route".
+ * }
*/
-static inline void __attribute__((always_inline))
-virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
- uint32_t desc_idx, uint8_t need_copy)
+static int
+switch_worker(void *arg __rte_unused)
{
+ unsigned i;
+ unsigned lcore_id = rte_lcore_id();
+ struct vhost_dev *vdev;
struct mbuf_table *tx_q;
- struct rte_mbuf **m_table;
- void *obj = NULL;
- struct rte_mbuf *mbuf;
- unsigned len, ret, offset = 0;
- struct vpool *vpool;
- uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
- uint16_t vmdq_rx_q = ((struct vhost_dev *)dev->priv)->vmdq_rx_q;
-
- /*Add packet to the port tx queue*/
- tx_q = &tx_queue_zcp[vmdq_rx_q];
- len = tx_q->len;
-
- /* Allocate an mbuf and populate the structure. */
- vpool = &vpool_array[MAX_QUEUES + vmdq_rx_q];
- rte_ring_sc_dequeue(vpool->ring, &obj);
- mbuf = obj;
- if (unlikely(mbuf == NULL)) {
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Failed to allocate memory for mbuf.\n",
- dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- return;
- }
-
- if (vm2vm_mode == VM2VM_HARDWARE) {
- /* Avoid using a vlan tag from any vm for external pkt, such as
- * vlan_tags[dev->device_fh], oterwise, it conflicts when pool
- * selection, MAC address determines it as an external pkt
- * which should go to network, while vlan tag determine it as
- * a vm2vm pkt should forward to another vm. Hardware confuse
- * such a ambiguous situation, so pkt will lost.
- */
- vlan_tag = external_pkt_default_vlan_tag;
- if (find_local_dest(dev, m, &offset, &vlan_tag) != 0) {
- MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
- __rte_mbuf_raw_free(mbuf);
- return;
- }
- }
-
- mbuf->nb_segs = m->nb_segs;
- mbuf->next = m->next;
- mbuf->data_len = m->data_len + offset;
- mbuf->pkt_len = mbuf->data_len;
- if (unlikely(need_copy)) {
- /* Copy the packet contents to the mbuf. */
- rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
- rte_pktmbuf_mtod(m, void *),
- m->data_len);
- } else {
- mbuf->data_off = m->data_off;
- mbuf->buf_physaddr = m->buf_physaddr;
- mbuf->buf_addr = m->buf_addr;
- }
- mbuf->ol_flags |= PKT_TX_VLAN_PKT;
- mbuf->vlan_tci = vlan_tag;
- mbuf->l2_len = sizeof(struct ether_hdr);
- mbuf->l3_len = sizeof(struct ipv4_hdr);
- MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
-
- tx_q->m_table[len] = mbuf;
- len++;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
- dev->device_fh,
- mbuf->nb_segs,
- (mbuf->next == NULL) ? "null" : "non-null");
-
- if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
- }
- if (unlikely(len == MAX_PKT_BURST)) {
- m_table = (struct rte_mbuf **)tx_q->m_table;
- ret = rte_eth_tx_burst(ports[0],
- (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
+ RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
- /*
- * Free any buffers not handled by TX and update
- * the port stats.
- */
- if (unlikely(ret < len)) {
- do {
- rte_pktmbuf_free(m_table[ret]);
- } while (++ret < len);
+ tx_q = &lcore_tx_queue[lcore_id];
+ for (i = 0; i < rte_lcore_count(); i++) {
+ if (lcore_ids[i] == lcore_id) {
+ tx_q->txq_id = i;
+ break;
}
-
- len = 0;
- txmbuf_clean_zcp(dev, vpool);
}
- tx_q->len = len;
-
- return;
-}
-
-/*
- * This function TX all available packets in virtio TX queue for one
- * virtio-net device. If it is first packet, it learns MAC address and
- * setup VMDQ.
- */
-static inline void __attribute__((always_inline))
-virtio_dev_tx_zcp(struct virtio_net *dev)
-{
- struct rte_mbuf m;
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- uint64_t buff_addr = 0, phys_addr;
- uint32_t head[MAX_PKT_BURST];
- uint32_t i;
- uint16_t free_entries, packet_success = 0;
- uint16_t avail_idx;
- uint8_t need_copy = 0;
- hpa_type addr_type;
- struct vhost_dev *vdev = (struct vhost_dev *)dev->priv;
-
- vq = dev->virtqueue[VIRTIO_TXQ];
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- /* If there are no available buffers then return. */
- if (vq->last_used_idx_res == avail_idx)
- return;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
-
- /* Prefetch available ring to retrieve head indexes. */
- rte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]);
-
- /* Get the number of free entries in the ring */
- free_entries = (avail_idx - vq->last_used_idx_res);
-
- /* Limit to MAX_PKT_BURST. */
- free_entries
- = (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
- dev->device_fh, free_entries);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (i = 0; i < free_entries; i++)
- head[i]
- = vq->avail->ring[(vq->last_used_idx_res + i)
- & (vq->size - 1)];
-
- vq->last_used_idx_res += free_entries;
-
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
- rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);
-
- while (packet_success < free_entries) {
- desc = &vq->desc[head[packet_success]];
-
- /* Discard first buffer as it is the virtio header */
- desc = &vq->desc[desc->next];
-
- /* Buffer address translation. */
- buff_addr = gpa_to_vva(dev, desc->addr);
- /* Need check extra VLAN_HLEN size for inserting VLAN tag */
- phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len + VLAN_HLEN,
- &addr_type);
-
- if (likely(packet_success < (free_entries - 1)))
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success + 1]]);
-
- if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Invalid frame buffer address found"
- "when TX packets!\n",
- dev->device_fh);
- packet_success++;
- continue;
- }
-
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)buff_addr);
-
- /*
- * Setup dummy mbuf. This is copied to a real mbuf if
- * transmitted out the physical port.
- */
- m.data_len = desc->len;
- m.nb_segs = 1;
- m.next = NULL;
- m.data_off = 0;
- m.buf_addr = (void *)(uintptr_t)buff_addr;
- m.buf_physaddr = phys_addr;
-
- /*
- * Check if the frame buffer address from guest crosses
- * sub-region or not.
- */
- if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Frame buffer address cross "
- "sub-regioin found when attaching TX frame "
- "buffer address!\n",
- dev->device_fh);
- need_copy = 1;
- } else
- need_copy = 0;
-
- PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);
+ while(1) {
+ drain_mbuf_table(tx_q);
/*
- * If this is the first received packet we need to learn
- * the MAC and setup VMDQ
+ * Inform the configuration core that we have exited the
+ * linked list and that no devices are in use if requested.
*/
- if (unlikely(vdev->ready == DEVICE_MAC_LEARNING)) {
- if (vdev->remove || (link_vmdq(vdev, &m) == -1)) {
- /*
- * Discard frame if device is scheduled for
- * removal or a duplicate MAC address is found.
- */
- packet_success += free_entries;
- vq->last_used_idx += packet_success;
- break;
- }
- }
-
- virtio_tx_route_zcp(dev, &m, head[packet_success], need_copy);
- packet_success++;
- }
-}
-
-/*
- * This function is called by each data core. It handles all RX/TX registered
- * with the core. For TX the specific lcore linked list is used. For RX, MAC
- * addresses are compared with all devices in the main linked list.
- */
-static int
-switch_worker_zcp(__attribute__((unused)) void *arg)
-{
- struct virtio_net *dev = NULL;
- struct vhost_dev *vdev = NULL;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct virtio_net_data_ll *dev_ll;
- struct mbuf_table *tx_q;
- volatile struct lcore_ll_info *lcore_ll;
- const uint64_t drain_tsc
- = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S
- * BURST_TX_DRAIN_US;
- uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
- unsigned ret;
- const uint16_t lcore_id = rte_lcore_id();
- uint16_t count_in_ring, rx_count = 0;
-
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
-
- lcore_ll = lcore_info[lcore_id].lcore_ll;
- prev_tsc = 0;
-
- while (1) {
- cur_tsc = rte_rdtsc();
-
- /* TX burst queue drain */
- diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
- /*
- * Get mbuf from vpool.pool and detach mbuf and
- * put back into vpool.ring.
- */
- dev_ll = lcore_ll->ll_root_used;
- while ((dev_ll != NULL) && (dev_ll->vdev != NULL)) {
- /* Get virtio device ID */
- vdev = dev_ll->vdev;
- dev = vdev->dev;
-
- if (likely(!vdev->remove)) {
- tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q];
- if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA,
- "TX queue drained after timeout"
- " with burst size %u\n",
- tx_q->len);
-
- /*
- * Tx any packets in the queue
- */
- ret = rte_eth_tx_burst(
- ports[0],
- (uint16_t)tx_q->txq_id,
- (struct rte_mbuf **)
- tx_q->m_table,
- (uint16_t)tx_q->len);
- if (unlikely(ret < tx_q->len)) {
- do {
- rte_pktmbuf_free(
- tx_q->m_table[ret]);
- } while (++ret < tx_q->len);
- }
- tx_q->len = 0;
-
- txmbuf_clean_zcp(dev,
- &vpool_array[MAX_QUEUES+vdev->vmdq_rx_q]);
- }
- }
- dev_ll = dev_ll->next;
- }
- prev_tsc = cur_tsc;
- }
-
- rte_prefetch0(lcore_ll->ll_root_used);
+ if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
+ lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
/*
- * Inform the configuration core that we have exited the linked
- * list and that no devices are in use if requested.
+ * Process vhost devices
*/
- if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
- lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
-
- /* Process devices */
- dev_ll = lcore_ll->ll_root_used;
-
- while ((dev_ll != NULL) && (dev_ll->vdev != NULL)) {
- vdev = dev_ll->vdev;
- dev = vdev->dev;
+ TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
+ lcore_vdev_entry) {
if (unlikely(vdev->remove)) {
- dev_ll = dev_ll->next;
unlink_vmdq(vdev);
vdev->ready = DEVICE_SAFE_REMOVE;
continue;
}
- if (likely(vdev->ready == DEVICE_RX)) {
- uint32_t index = vdev->vmdq_rx_q;
- uint16_t i;
- count_in_ring
- = rte_ring_count(vpool_array[index].ring);
- uint16_t free_entries
- = (uint16_t)get_available_ring_num_zcp(dev);
-
- /*
- * Attach all mbufs in vpool.ring and put back
- * into vpool.pool.
- */
- for (i = 0;
- i < RTE_MIN(free_entries,
- RTE_MIN(count_in_ring, MAX_PKT_BURST));
- i++)
- attach_rxmbuf_zcp(dev);
-
- /* Handle guest RX */
- rx_count = rte_eth_rx_burst(ports[0],
- vdev->vmdq_rx_q, pkts_burst,
- MAX_PKT_BURST);
-
- if (rx_count) {
- ret_count = virtio_dev_rx_zcp(dev,
- pkts_burst, rx_count);
- if (enable_stats) {
- dev_statistics[dev->device_fh].rx_total
- += rx_count;
- dev_statistics[dev->device_fh].rx
- += ret_count;
- }
- while (likely(rx_count)) {
- rx_count--;
- pktmbuf_detach_zcp(
- pkts_burst[rx_count]);
- rte_ring_sp_enqueue(
- vpool_array[index].ring,
- (void *)pkts_burst[rx_count]);
- }
- }
- }
+ if (likely(vdev->ready == DEVICE_RX))
+ drain_eth_rx(vdev);
if (likely(!vdev->remove))
- /* Handle guest TX */
- virtio_dev_tx_zcp(dev);
-
- /* Move to the next device in the list */
- dev_ll = dev_ll->next;
+ drain_virtio_tx(vdev);
}
}
return 0;
}
-
-/*
- * Add an entry to a used linked list. A free entry must first be found
- * in the free linked list using get_data_ll_free_entry();
- */
-static void
-add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
- struct virtio_net_data_ll *ll_dev)
-{
- struct virtio_net_data_ll *ll = *ll_root_addr;
-
- /* Set next as NULL and use a compiler barrier to avoid reordering. */
- ll_dev->next = NULL;
- rte_compiler_barrier();
-
- /* If ll == NULL then this is the first device. */
- if (ll) {
- /* Increment to the tail of the linked list. */
- while ((ll->next != NULL) )
- ll = ll->next;
-
- ll->next = ll_dev;
- } else {
- *ll_root_addr = ll_dev;
- }
-}
-
-/*
- * Remove an entry from a used linked list. The entry must then be added to
- * the free linked list using put_data_ll_free_entry().
- */
-static void
-rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
- struct virtio_net_data_ll *ll_dev,
- struct virtio_net_data_ll *ll_dev_last)
-{
- struct virtio_net_data_ll *ll = *ll_root_addr;
-
- if (unlikely((ll == NULL) || (ll_dev == NULL)))
- return;
-
- if (ll_dev == ll)
- *ll_root_addr = ll_dev->next;
- else
- if (likely(ll_dev_last != NULL))
- ll_dev_last->next = ll_dev->next;
- else
- RTE_LOG(ERR, VHOST_CONFIG, "Remove entry form ll failed.\n");
-}
-
-/*
- * Find and return an entry from the free linked list.
- */
-static struct virtio_net_data_ll *
-get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
-{
- struct virtio_net_data_ll *ll_free = *ll_root_addr;
- struct virtio_net_data_ll *ll_dev;
-
- if (ll_free == NULL)
- return NULL;
-
- ll_dev = ll_free;
- *ll_root_addr = ll_free->next;
-
- return ll_dev;
-}
-
-/*
- * Place an entry back on to the free linked list.
- */
-static void
-put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr,
- struct virtio_net_data_ll *ll_dev)
-{
- struct virtio_net_data_ll *ll_free = *ll_root_addr;
-
- if (ll_dev == NULL)
- return;
-
- ll_dev->next = ll_free;
- *ll_root_addr = ll_dev;
-}
-
/*
- * Creates a linked list of a given size.
- */
-static struct virtio_net_data_ll *
-alloc_data_ll(uint32_t size)
-{
- struct virtio_net_data_ll *ll_new;
- uint32_t i;
-
- /* Malloc and then chain the linked list. */
- ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
- if (ll_new == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
- return NULL;
- }
-
- for (i = 0; i < size - 1; i++) {
- ll_new[i].vdev = NULL;
- ll_new[i].next = &ll_new[i+1];
- }
- ll_new[i].next = NULL;
-
- return ll_new;
-}
-
-/*
- * Create the main linked list along with each individual cores linked list. A used and a free list
- * are created to manage entries.
- */
-static int
-init_data_ll (void)
-{
- int lcore;
-
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
- if (lcore_info[lcore].lcore_ll == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
- return -1;
- }
-
- lcore_info[lcore].lcore_ll->device_num = 0;
- lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
- lcore_info[lcore].lcore_ll->ll_root_used = NULL;
- if (num_devices % num_switching_cores)
- lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
- else
- lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
- }
-
- /* Allocate devices up to a maximum of MAX_DEVICES. */
- ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
-
- return 0;
-}
-
-/*
- * Remove a device from the specific data core linked list and from the main linked list. Synchonization
- * occurs through the use of the lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
+ * Remove a device from the specific data core linked list and from the
+ * main linked list. Synchonization occurs through the use of the
+ * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
* of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
*/
static void
-destroy_device (volatile struct virtio_net *dev)
+destroy_device(int vid)
{
- struct virtio_net_data_ll *ll_lcore_dev_cur;
- struct virtio_net_data_ll *ll_main_dev_cur;
- struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
- struct virtio_net_data_ll *ll_main_dev_last = NULL;
- struct vhost_dev *vdev;
+ struct vhost_dev *vdev = NULL;
int lcore;
- dev->flags &= ~VIRTIO_DEV_RUNNING;
-
- vdev = (struct vhost_dev *)dev->priv;
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return;
/*set the remove flag. */
vdev->remove = 1;
while(vdev->ready != DEVICE_SAFE_REMOVE) {
rte_pause();
}
- /* Search for entry to be removed from lcore ll */
- ll_lcore_dev_cur = lcore_info[vdev->coreid].lcore_ll->ll_root_used;
- while (ll_lcore_dev_cur != NULL) {
- if (ll_lcore_dev_cur->vdev == vdev) {
- break;
- } else {
- ll_lcore_dev_last = ll_lcore_dev_cur;
- ll_lcore_dev_cur = ll_lcore_dev_cur->next;
- }
- }
-
- if (ll_lcore_dev_cur == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find the dev to be destroy.\n",
- dev->device_fh);
- return;
- }
+ TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
+ lcore_vdev_entry);
+ TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
- /* Search for entry to be removed from main ll */
- ll_main_dev_cur = ll_root_used;
- ll_main_dev_last = NULL;
- while (ll_main_dev_cur != NULL) {
- if (ll_main_dev_cur->vdev == vdev) {
- break;
- } else {
- ll_main_dev_last = ll_main_dev_cur;
- ll_main_dev_cur = ll_main_dev_cur->next;
- }
- }
-
- /* Remove entries from the lcore and main ll. */
- rm_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
- rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
/* Set the dev_removal_flag on each lcore. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
- }
+ RTE_LCORE_FOREACH_SLAVE(lcore)
+ lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
/*
- * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
- * they can no longer access the device removed from the linked lists and that the devices
- * are no longer in use.
+ * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
+ * we can be sure that they can no longer access the device removed
+ * from the linked lists and that the devices are no longer in use.
*/
RTE_LCORE_FOREACH_SLAVE(lcore) {
- while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
+ while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
rte_pause();
- }
}
- /* Add the entries back to the lcore and main free ll.*/
- put_data_ll_free_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
- put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
+ lcore_info[vdev->coreid].device_num--;
- /* Decrement number of device on the lcore. */
- lcore_info[vdev->coreid].lcore_ll->device_num--;
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) device has been removed from data core\n",
+ vdev->vid);
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
-
- if (zero_copy) {
- struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
-
- /* Stop the RX queue. */
- if (rte_eth_dev_rx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In destroy_device: Failed to stop "
- "rx queue:%d\n",
- dev->device_fh,
- vdev->vmdq_rx_q);
- }
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") in destroy_device: Start put mbuf in "
- "mempool back to ring for RX queue: %d\n",
- dev->device_fh, vdev->vmdq_rx_q);
-
- mbuf_destroy_zcp(vpool);
-
- /* Stop the TX queue. */
- if (rte_eth_dev_tx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In destroy_device: Failed to "
- "stop tx queue:%d\n",
- dev->device_fh, vdev->vmdq_rx_q);
- }
-
- vpool = &vpool_array[vdev->vmdq_rx_q + MAX_QUEUES];
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") destroy_device: Start put mbuf in mempool "
- "back to ring for TX queue: %d, dev:(%"PRIu64")\n",
- dev->device_fh, (vdev->vmdq_rx_q + MAX_QUEUES),
- dev->device_fh);
-
- mbuf_destroy_zcp(vpool);
- rte_free(vdev->regions_hpa);
- }
rte_free(vdev);
-
-}
-
-/*
- * Calculate the region count of physical continous regions for one particular
- * region of whose vhost virtual address is continous. The particular region
- * start from vva_start, with size of 'size' in argument.
- */
-static uint32_t
-check_hpa_regions(uint64_t vva_start, uint64_t size)
-{
- uint32_t i, nregions = 0, page_size = getpagesize();
- uint64_t cur_phys_addr = 0, next_phys_addr = 0;
- if (vva_start % page_size) {
- LOG_DEBUG(VHOST_CONFIG,
- "in check_countinous: vva start(%p) mod page_size(%d) "
- "has remainder\n",
- (void *)(uintptr_t)vva_start, page_size);
- return 0;
- }
- if (size % page_size) {
- LOG_DEBUG(VHOST_CONFIG,
- "in check_countinous: "
- "size((%"PRIu64")) mod page_size(%d) has remainder\n",
- size, page_size);
- return 0;
- }
- for (i = 0; i < size - page_size; i = i + page_size) {
- cur_phys_addr
- = rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i));
- next_phys_addr = rte_mem_virt2phy(
- (void *)(uintptr_t)(vva_start + i + page_size));
- if ((cur_phys_addr + page_size) != next_phys_addr) {
- ++nregions;
- LOG_DEBUG(VHOST_CONFIG,
- "in check_continuous: hva addr:(%p) is not "
- "continuous with hva addr:(%p), diff:%d\n",
- (void *)(uintptr_t)(vva_start + (uint64_t)i),
- (void *)(uintptr_t)(vva_start + (uint64_t)i
- + page_size), page_size);
- LOG_DEBUG(VHOST_CONFIG,
- "in check_continuous: hpa addr:(%p) is not "
- "continuous with hpa addr:(%p), "
- "diff:(%"PRIu64")\n",
- (void *)(uintptr_t)cur_phys_addr,
- (void *)(uintptr_t)next_phys_addr,
- (next_phys_addr-cur_phys_addr));
- }
- }
- return nregions;
-}
-
-/*
- * Divide each region whose vhost virtual address is continous into a few
- * sub-regions, make sure the physical address within each sub-region are
- * continous. And fill offset(to GPA) and size etc. information of each
- * sub-region into regions_hpa.
- */
-static uint32_t
-fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct virtio_memory *virtio_memory)
-{
- uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = getpagesize();
- uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start;
-
- if (mem_region_hpa == NULL)
- return 0;
-
- for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) {
- vva_start = virtio_memory->regions[regionidx].guest_phys_address +
- virtio_memory->regions[regionidx].address_offset;
- mem_region_hpa[regionidx_hpa].guest_phys_address
- = virtio_memory->regions[regionidx].guest_phys_address;
- mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
- rte_mem_virt2phy((void *)(uintptr_t)(vva_start)) -
- mem_region_hpa[regionidx_hpa].guest_phys_address;
- LOG_DEBUG(VHOST_CONFIG,
- "in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].guest_phys_address));
- LOG_DEBUG(VHOST_CONFIG,
- "in fill_hpa_regions: host phys addr start[%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
- for (i = 0, k = 0;
- i < virtio_memory->regions[regionidx].memory_size -
- page_size;
- i += page_size) {
- cur_phys_addr = rte_mem_virt2phy(
- (void *)(uintptr_t)(vva_start + i));
- next_phys_addr = rte_mem_virt2phy(
- (void *)(uintptr_t)(vva_start +
- i + page_size));
- if ((cur_phys_addr + page_size) != next_phys_addr) {
- mem_region_hpa[regionidx_hpa].guest_phys_address_end =
- mem_region_hpa[regionidx_hpa].guest_phys_address +
- k + page_size;
- mem_region_hpa[regionidx_hpa].memory_size
- = k + page_size;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
- "phys addr end [%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].guest_phys_address_end));
- LOG_DEBUG(VHOST_CONFIG,
- "in fill_hpa_regions: guest phys addr "
- "size [%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].memory_size));
- mem_region_hpa[regionidx_hpa + 1].guest_phys_address
- = mem_region_hpa[regionidx_hpa].guest_phys_address_end;
- ++regionidx_hpa;
- mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
- next_phys_addr -
- mem_region_hpa[regionidx_hpa].guest_phys_address;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
- " phys addr start[%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].guest_phys_address));
- LOG_DEBUG(VHOST_CONFIG,
- "in fill_hpa_regions: host phys addr "
- "start[%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
- k = 0;
- } else {
- k += page_size;
- }
- }
- mem_region_hpa[regionidx_hpa].guest_phys_address_end
- = mem_region_hpa[regionidx_hpa].guest_phys_address
- + k + page_size;
- mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end "
- "[%d]:(%p)\n", regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].guest_phys_address_end));
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
- "[%d]:(%p)\n", regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].memory_size));
- ++regionidx_hpa;
- }
- return regionidx_hpa;
}
/*
@@ -2654,148 +1224,23 @@ fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct
* and the allocated to a specific data core.
*/
static int
-new_device (struct virtio_net *dev)
+new_device(int vid)
{
- struct virtio_net_data_ll *ll_dev;
int lcore, core_add = 0;
uint32_t device_num_min = num_devices;
struct vhost_dev *vdev;
- uint32_t regionidx;
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
- dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) couldn't allocate memory for vhost dev\n",
+ vid);
return -1;
}
- vdev->dev = dev;
- dev->priv = vdev;
-
- if (zero_copy) {
- vdev->nregions_hpa = dev->mem->nregions;
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- vdev->nregions_hpa
- += check_hpa_regions(
- dev->mem->regions[regionidx].guest_phys_address
- + dev->mem->regions[regionidx].address_offset,
- dev->mem->regions[regionidx].memory_size);
+ vdev->vid = vid;
- }
-
- vdev->regions_hpa = rte_calloc("vhost hpa region",
- vdev->nregions_hpa,
- sizeof(struct virtio_memory_regions_hpa),
- RTE_CACHE_LINE_SIZE);
- if (vdev->regions_hpa == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n");
- rte_free(vdev);
- return -1;
- }
-
-
- if (fill_hpa_memory_regions(
- vdev->regions_hpa, dev->mem
- ) != vdev->nregions_hpa) {
-
- RTE_LOG(ERR, VHOST_CONFIG,
- "hpa memory regions number mismatch: "
- "[%d]\n", vdev->nregions_hpa);
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
- }
-
-
- /* Add device to main ll */
- ll_dev = get_data_ll_free_entry(&ll_root_free);
- if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
- "of %d devices per core has been reached\n",
- dev->device_fh, num_devices);
- if (vdev->regions_hpa)
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
- ll_dev->vdev = vdev;
- add_data_ll_entry(&ll_root_used, ll_dev);
- vdev->vmdq_rx_q
- = dev->device_fh * queues_per_pool + vmdq_queue_base;
-
- if (zero_copy) {
- uint32_t index = vdev->vmdq_rx_q;
- uint32_t count_in_ring, i;
- struct mbuf_table *tx_q;
-
- count_in_ring = rte_ring_count(vpool_array[index].ring);
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") in new_device: mbuf count in mempool "
- "before attach is: %d\n",
- dev->device_fh,
- rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") in new_device: mbuf count in ring "
- "before attach is : %d\n",
- dev->device_fh, count_in_ring);
-
- /*
- * Attach all mbufs in vpool.ring and put back intovpool.pool.
- */
- for (i = 0; i < count_in_ring; i++)
- attach_rxmbuf_zcp(dev);
-
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
- "mempool after attach is: %d\n",
- dev->device_fh,
- rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
- "ring after attach is : %d\n",
- dev->device_fh,
- rte_ring_count(vpool_array[index].ring));
-
- tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q];
- tx_q->txq_id = vdev->vmdq_rx_q;
-
- if (rte_eth_dev_tx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) {
- struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In new_device: Failed to start "
- "tx queue:%d\n",
- dev->device_fh, vdev->vmdq_rx_q);
-
- mbuf_destroy_zcp(vpool);
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
-
- if (rte_eth_dev_rx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) {
- struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In new_device: Failed to start "
- "rx queue:%d\n",
- dev->device_fh, vdev->vmdq_rx_q);
-
- /* Stop the TX queue. */
- if (rte_eth_dev_tx_queue_stop(ports[0],
- vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In new_device: Failed to "
- "stop tx queue:%d\n",
- dev->device_fh, vdev->vmdq_rx_q);
- }
-
- mbuf_destroy_zcp(vpool);
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
-
- }
+ TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
+ vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
/*reset ready flag*/
vdev->ready = DEVICE_MAC_LEARNING;
@@ -2803,36 +1248,24 @@ new_device (struct virtio_net *dev)
/* Find a suitable lcore to add the device. */
RTE_LCORE_FOREACH_SLAVE(lcore) {
- if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
- device_num_min = lcore_info[lcore].lcore_ll->device_num;
+ if (lcore_info[lcore].device_num < device_num_min) {
+ device_num_min = lcore_info[lcore].device_num;
core_add = lcore;
}
}
- /* Add device to lcore ll */
- ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
- if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
- vdev->ready = DEVICE_SAFE_REMOVE;
- destroy_device(dev);
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
- ll_dev->vdev = vdev;
vdev->coreid = core_add;
- add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_dev);
-
- /* Initialize device stats */
- memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
+ TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
+ lcore_vdev_entry);
+ lcore_info[vdev->coreid].device_num++;
/* Disable notifications. */
- rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
- rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
- lcore_info[vdev->coreid].lcore_ll->device_num++;
- dev->flags |= VIRTIO_DEV_RUNNING;
+ rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, vdev->coreid);
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) device has been added to data core %d\n",
+ vid, vdev->coreid);
return 0;
}
@@ -2854,10 +1287,9 @@ static const struct virtio_net_device_ops virtio_net_device_ops =
static void
print_stats(void)
{
- struct virtio_net_data_ll *dev_ll;
+ struct vhost_dev *vdev;
uint64_t tx_dropped, rx_dropped;
uint64_t tx, tx_total, rx, rx_total;
- uint32_t device_fh;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
@@ -2865,77 +1297,32 @@ print_stats(void)
sleep(enable_stats);
/* Clear screen and move to top left */
- printf("%s%s", clr, top_left);
-
- printf("\nDevice statistics ====================================");
+ printf("%s%s\n", clr, top_left);
+ printf("Device statistics =================================\n");
- dev_ll = ll_root_used;
- while (dev_ll != NULL) {
- device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
- tx_total = dev_statistics[device_fh].tx_total;
- tx = dev_statistics[device_fh].tx;
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ tx_total = vdev->stats.tx_total;
+ tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- if (zero_copy == 0) {
- rx_total = rte_atomic64_read(
- &dev_statistics[device_fh].rx_total_atomic);
- rx = rte_atomic64_read(
- &dev_statistics[device_fh].rx_atomic);
- } else {
- rx_total = dev_statistics[device_fh].rx_total;
- rx = dev_statistics[device_fh].rx;
- }
+
+ rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
+ rx = rte_atomic64_read(&vdev->stats.rx_atomic);
rx_dropped = rx_total - rx;
- printf("\nStatistics for device %"PRIu32" ------------------------------"
- "\nTX total: %"PRIu64""
- "\nTX dropped: %"PRIu64""
- "\nTX successful: %"PRIu64""
- "\nRX total: %"PRIu64""
- "\nRX dropped: %"PRIu64""
- "\nRX successful: %"PRIu64"",
- device_fh,
- tx_total,
- tx_dropped,
- tx,
- rx_total,
- rx_dropped,
- rx);
-
- dev_ll = dev_ll->next;
+ printf("Statistics for device %d\n"
+ "-----------------------\n"
+ "TX total: %" PRIu64 "\n"
+ "TX dropped: %" PRIu64 "\n"
+ "TX successful: %" PRIu64 "\n"
+ "RX total: %" PRIu64 "\n"
+ "RX dropped: %" PRIu64 "\n"
+ "RX successful: %" PRIu64 "\n",
+ vdev->vid,
+ tx_total, tx_dropped, tx,
+ rx_total, rx_dropped, rx);
}
- printf("\n======================================================\n");
- }
-}
-static void
-setup_mempool_tbl(int socket, uint32_t index, char *pool_name,
- char *ring_name, uint32_t nb_mbuf)
-{
- vpool_array[index].pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
- MBUF_CACHE_SIZE_ZCP, 0, MBUF_DATA_SIZE_ZCP, socket);
- if (vpool_array[index].pool != NULL) {
- vpool_array[index].ring
- = rte_ring_create(ring_name,
- rte_align32pow2(nb_mbuf + 1),
- socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
- if (likely(vpool_array[index].ring != NULL)) {
- LOG_DEBUG(VHOST_CONFIG,
- "in setup_mempool_tbl: mbuf count in "
- "mempool is: %d\n",
- rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG,
- "in setup_mempool_tbl: mbuf count in "
- "ring is: %d\n",
- rte_ring_count(vpool_array[index].ring));
- } else {
- rte_exit(EXIT_FAILURE, "ring_create(%s) failed",
- ring_name);
- }
-
- /* Need consider head room. */
- vpool_array[index].buf_size = VIRTIO_DESCRIPTOR_LEN_ZCP;
- } else {
- rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", pool_name);
+ printf("===================================================\n");
}
}
@@ -2951,20 +1338,70 @@ sigint_handler(__rte_unused int signum)
}
/*
+ * While creating an mbuf pool, one key thing is to figure out how
+ * many mbuf entries is enough for our use. FYI, here are some
+ * guidelines:
+ *
+ * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
+ *
+ * - For each switch core (A CPU core does the packet switch), we need
+ * also make some reservation for receiving the packets from virtio
+ * Tx queue. How many is enough depends on the usage. It's normally
+ * a simple calculation like following:
+ *
+ * MAX_PKT_BURST * max packet size / mbuf size
+ *
+ * So, we definitely need allocate more mbufs when TSO is enabled.
+ *
+ * - Similarly, for each switching core, we should serve @nr_rx_desc
+ * mbufs for receiving the packets from physical NIC device.
+ *
+ * - We also need make sure, for each switch core, we have allocated
+ * enough mbufs to fill up the mbuf cache.
+ */
+static void
+create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
+ uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
+{
+ uint32_t nr_mbufs;
+ uint32_t nr_mbufs_per_core;
+ uint32_t mtu = 1500;
+
+ if (mergeable)
+ mtu = 9000;
+ if (enable_tso)
+ mtu = 64 * 1024;
+
+ nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
+ (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
+ nr_mbufs_per_core += nr_rx_desc;
+ nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
+
+ nr_mbufs = nr_queues * nr_rx_desc;
+ nr_mbufs += nr_mbufs_per_core * nr_switch_core;
+ nr_mbufs *= nr_port;
+
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
+ nr_mbuf_cache, 0, mbuf_size,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+}
+
+/*
* Main function, does initialisation and calls the per-lcore functions. The CUSE
* device is also registered here to handle the IOCTLs.
*/
int
main(int argc, char *argv[])
{
- struct rte_mempool *mbuf_pool = NULL;
unsigned lcore_id, core_id = 0;
unsigned nb_ports, valid_num_ports;
int ret;
uint8_t portid;
- uint16_t queue_id;
static pthread_t tid;
char thread_name[RTE_MAX_THREAD_NAME_LEN];
+ uint64_t flags = 0;
signal(SIGINT, sigint_handler);
@@ -2981,19 +1418,16 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Invalid argument\n");
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
+ TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
+
if (rte_lcore_is_enabled(lcore_id))
lcore_ids[core_id ++] = lcore_id;
if (rte_lcore_count() > RTE_MAX_LCORE)
rte_exit(EXIT_FAILURE,"Not enough cores\n");
- /*set the number of swithcing cores available*/
- num_switching_cores = rte_lcore_count()-1;
-
/* Get the number of physical ports. */
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NUM_PORTS and global array PORTS
@@ -3007,64 +1441,21 @@ main(int argc, char *argv[])
return -1;
}
- if (zero_copy == 0) {
- /* Create the mbuf pool. */
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
- NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE,
- 0, MBUF_DATA_SIZE, rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-
- for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
- vpool_array[queue_id].pool = mbuf_pool;
-
- if (vm2vm_mode == VM2VM_HARDWARE) {
- /* Enable VT loop back to let L2 switch to do it. */
- vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
- LOG_DEBUG(VHOST_CONFIG,
- "Enable loop back for L2 switch in vmdq.\n");
- }
- } else {
- uint32_t nb_mbuf;
- char pool_name[RTE_MEMPOOL_NAMESIZE];
- char ring_name[RTE_MEMPOOL_NAMESIZE];
-
- nb_mbuf = num_rx_descriptor
- + num_switching_cores * MBUF_CACHE_SIZE_ZCP
- + num_switching_cores * MAX_PKT_BURST;
-
- for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) {
- snprintf(pool_name, sizeof(pool_name),
- "rxmbuf_pool_%u", queue_id);
- snprintf(ring_name, sizeof(ring_name),
- "rxmbuf_ring_%u", queue_id);
- setup_mempool_tbl(rte_socket_id(), queue_id,
- pool_name, ring_name, nb_mbuf);
- }
-
- nb_mbuf = num_tx_descriptor
- + num_switching_cores * MBUF_CACHE_SIZE_ZCP
- + num_switching_cores * MAX_PKT_BURST;
-
- for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) {
- snprintf(pool_name, sizeof(pool_name),
- "txmbuf_pool_%u", queue_id);
- snprintf(ring_name, sizeof(ring_name),
- "txmbuf_ring_%u", queue_id);
- setup_mempool_tbl(rte_socket_id(),
- (queue_id + MAX_QUEUES),
- pool_name, ring_name, nb_mbuf);
- }
+ /*
+ * FIXME: here we are trying to allocate mbufs big enough for
+ * @MAX_QUEUES, but the truth is we're never going to use that
+ * many queues here. We probably should only do allocation for
+ * those queues we are going to use.
+ */
+ create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
+ MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
- if (vm2vm_mode == VM2VM_HARDWARE) {
- /* Enable VT loop back to let L2 switch to do it. */
- vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
- LOG_DEBUG(VHOST_CONFIG,
- "Enable loop back for L2 switch in vmdq.\n");
- }
+ if (vm2vm_mode == VM2VM_HARDWARE) {
+ /* Enable VT loop back to let L2 switch to do it. */
+ vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "Enable loop back for L2 switch in vmdq.\n");
}
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
@@ -3079,13 +1470,6 @@ main(int argc, char *argv[])
"Cannot initialize network ports\n");
}
- /* Initialise all linked lists. */
- if (init_data_ll() == -1)
- rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
-
- /* Initialize device stats */
- memset(&dev_statistics, 0, sizeof(dev_statistics));
-
/* Enable stats if the user option is set. */
if (enable_stats) {
ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
@@ -3097,54 +1481,22 @@ main(int argc, char *argv[])
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
ret = rte_thread_setname(tid, thread_name);
if (ret != 0)
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"Cannot set print-stats name\n");
}
/* Launch all data cores. */
- if (zero_copy == 0) {
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- rte_eal_remote_launch(switch_worker,
- mbuf_pool, lcore_id);
- }
- } else {
- uint32_t count_in_mempool, index, i;
- for (index = 0; index < 2*MAX_QUEUES; index++) {
- /* For all RX and TX queues. */
- count_in_mempool
- = rte_mempool_count(vpool_array[index].pool);
-
- /*
- * Transfer all un-attached mbufs from vpool.pool
- * to vpoo.ring.
- */
- for (i = 0; i < count_in_mempool; i++) {
- struct rte_mbuf *mbuf
- = __rte_mbuf_raw_alloc(
- vpool_array[index].pool);
- rte_ring_sp_enqueue(vpool_array[index].ring,
- (void *)mbuf);
- }
-
- LOG_DEBUG(VHOST_CONFIG,
- "in main: mbuf count in mempool at initial "
- "is: %d\n", count_in_mempool);
- LOG_DEBUG(VHOST_CONFIG,
- "in main: mbuf count in ring at initial is :"
- " %d\n",
- rte_ring_count(vpool_array[index].ring));
- }
-
- RTE_LCORE_FOREACH_SLAVE(lcore_id)
- rte_eal_remote_launch(switch_worker_zcp, NULL,
- lcore_id);
- }
+ RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ rte_eal_remote_launch(switch_worker, NULL, lcore_id);
if (mergeable == 0)
rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
+ if (client_mode)
+ flags |= RTE_VHOST_USER_CLIENT;
+
/* Register vhost(cuse or user) driver to handle vhost messages. */
- ret = rte_vhost_driver_register((char *)&dev_basename);
+ ret = rte_vhost_driver_register(dev_basename, flags);
if (ret != 0)
rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index d04e2be2..6bb42e89 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,48 +34,23 @@
#ifndef _MAIN_H_
#define _MAIN_H_
-//#define DEBUG
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) do { \
- RTE_LOG(DEBUG, log_type, fmt, ##args); \
-} while (0)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do{} while(0)
-#endif
+#include <sys/queue.h>
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
#define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
-/**
- * Information relating to memory regions including offsets to
- * addresses in host physical space.
- */
-struct virtio_memory_regions_hpa {
- /**< Base guest physical address of region. */
- uint64_t guest_phys_address;
- /**< End guest physical address of region. */
- uint64_t guest_phys_address_end;
- /**< Size of region. */
- uint64_t memory_size;
- /**< Offset of region for gpa to hpa translation. */
- uint64_t host_phys_addr_offset;
+struct device_statistics {
+ uint64_t tx;
+ uint64_t tx_total;
+ rte_atomic64_t rx_atomic;
+ rte_atomic64_t rx_total_atomic;
};
-/*
- * Device linked list structure for data path.
- */
struct vhost_dev {
- /**< Pointer to device created by vhost lib. */
- struct virtio_net *dev;
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
- /**< Memory region information for gpa to hpa translation. */
- struct virtio_memory_regions_hpa *regions_hpa;
/**< Device MAC address (Obtained on first TX packet). */
struct ether_addr mac_address;
/**< RX VMDQ queue number. */
@@ -88,28 +63,29 @@ struct vhost_dev {
volatile uint8_t ready;
/**< Device is marked for removal from the data core. */
volatile uint8_t remove;
+
+ int vid;
+ struct device_statistics stats;
+ TAILQ_ENTRY(vhost_dev) global_vdev_entry;
+ TAILQ_ENTRY(vhost_dev) lcore_vdev_entry;
} __rte_cache_aligned;
-struct virtio_net_data_ll
-{
- struct vhost_dev *vdev; /* Pointer to device created by configuration core. */
- struct virtio_net_data_ll *next; /* Pointer to next device in linked list. */
-};
+TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
+
+
+#define REQUEST_DEV_REMOVAL 1
+#define ACK_DEV_REMOVAL 0
/*
* Structure containing data core specific information.
*/
-struct lcore_ll_info
-{
- struct virtio_net_data_ll *ll_root_free; /* Pointer to head in free linked list. */
- struct virtio_net_data_ll *ll_root_used; /* Pointer to head of used linked list. */
- uint32_t device_num; /* Number of devices on lcore. */
- volatile uint8_t dev_removal_flag; /* Flag to synchronize device removal. */
-};
+struct lcore_info {
+ uint32_t device_num;
+
+ /* Flag to synchronize device removal. */
+ volatile uint8_t dev_removal_flag;
-struct lcore_info
-{
- struct lcore_ll_info *lcore_ll; /* Pointer to data core specific lcore_ll_info struct */
+ struct vhost_dev_tailq_list vdev_list;
};
#endif /* _MAIN_H_ */
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
index d83138d6..2e403576 100644
--- a/examples/vhost_xen/main.c
+++ b/examples/vhost_xen/main.c
@@ -507,32 +507,6 @@ static unsigned check_ports_num(unsigned nb_ports)
}
/*
- * Macro to print out packet contents. Wrapped in debug define so that the
- * data path is not effected when debug is disabled.
- */
-#ifdef DEBUG
-#define PRINT_PACKET(device, addr, size, header) do { \
- char *pkt_addr = (char*)(addr); \
- unsigned int index; \
- char packet[MAX_PRINT_BUFF]; \
- \
- if ((header)) \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
- else \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
- for (index = 0; index < (size); index++) { \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
- "%02hhx ", pkt_addr[index]); \
- } \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
- \
- LOG_DEBUG(VHOST_DATA, "%s", packet); \
-} while(0)
-#else
-#define PRINT_PACKET(device, addr, size, header) do{} while(0)
-#endif
-
-/*
* Function to convert guest physical addresses to vhost virtual addresses. This
* is used to convert virtio buffer addresses.
*/
@@ -551,7 +525,7 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
break;
}
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n",
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
return vhost_va;
@@ -581,7 +555,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
uint8_t success = 0;
void *userdata;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
vq = dev->virtqueue_rx;
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/* As many data cores may want access to available buffers, they need to be reserved. */
@@ -606,7 +580,8 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
+ dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
@@ -800,17 +775,22 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
/* Drop the packet if the TX packet is destined for the TX device. */
if (dev_ll->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
- dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "Source and destination MAC addresses are the same. "
+ "Dropping packet.\n",
+ dev_ll->dev->device_fh);
return 0;
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "MAC address is local\n", dev_ll->dev->device_fh);
if (dev_ll->dev->remove) {
/*drop the packet if the device is marked for removal*/
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
+ "Device is marked for removal\n",
+ dev_ll->dev->device_fh);
} else {
/*send the packet to the local virtio device*/
ret = virtio_dev_rx(dev_ll->dev, &m, 1);
@@ -849,7 +829,8 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
return;
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "MAC address is external\n", dev->device_fh);
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
@@ -922,7 +903,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
if (vq->last_used_idx == avail_idx)
return;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
+ dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
@@ -931,7 +913,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
free_entries = avail_idx - vq->last_used_idx;
free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
+ dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
@@ -1020,7 +1003,9 @@ switch_worker(__attribute__((unused)) void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "TX queue drained after timeout with burst size %u\n",
+ tx_q->len);
/*Tx any packets in the queue*/
ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
@@ -1460,8 +1445,6 @@ main(int argc, char *argv[])
/* Get the number of physical ports. */
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NUM_PORTS and global array PORTS
@@ -1482,9 +1465,6 @@ main(int argc, char *argv[])
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
-
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
@@ -1514,7 +1494,7 @@ main(int argc, char *argv[])
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-xen-stats");
ret = rte_thread_setname(tid, thread_name);
if (ret != 0)
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"Cannot set print-stats name\n");
}
diff --git a/examples/vhost_xen/main.h b/examples/vhost_xen/main.h
index 481572e6..5ff48fd9 100644
--- a/examples/vhost_xen/main.h
+++ b/examples/vhost_xen/main.h
@@ -34,17 +34,6 @@
#ifndef _MAIN_H_
#define _MAIN_H_
-//#define DEBUG
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) \
- RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do{} while(0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 22c2ddd5..e068ae28 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -667,6 +667,7 @@ add_vm(const char *vm_name)
return -1;
}
strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
+ new_domain->name[sizeof(new_domain->name) - 1] = '\0';
new_domain->channel_mask = 0;
new_domain->num_channels = 0;
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 178af2f5..360492ba 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -599,8 +599,6 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Not enough cores\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NUM_PORTS and global array PORTS
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index 62e1422a..617263b4 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -662,8 +662,6 @@ main(int argc, char *argv[])
}
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NUM_PORTS and global array PORTS