aboutsummaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 14:51:32 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 16:20:45 +0200
commit7595afa4d30097c1177b69257118d8ad89a539be (patch)
tree4bfeadc905c977e45e54a90c42330553b8942e4e /examples
parentce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff)
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/Makefile7
-rw-r--r--examples/bond/Makefile4
-rw-r--r--examples/bond/main.c2
-rw-r--r--examples/distributor/main.c402
-rw-r--r--examples/dpdk_qat/config_files/coleto/dh895xcc_qa_dev0.conf65
-rw-r--r--examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev0.conf293
-rw-r--r--examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev1.conf292
-rw-r--r--examples/dpdk_qat/config_files/stargo/dh89xxcc_qa_dev0.conf235
-rw-r--r--examples/dpdk_qat/crypto.c943
-rw-r--r--examples/dpdk_qat/main.c821
-rw-r--r--examples/ethtool/Makefile3
-rw-r--r--examples/ethtool/ethtool-app/Makefile5
-rw-r--r--examples/ethtool/ethtool-app/ethapp.c2
-rw-r--r--examples/ethtool/ethtool-app/main.c1
-rw-r--r--examples/ethtool/lib/Makefile8
-rw-r--r--examples/ethtool/lib/rte_ethtool.c21
-rw-r--r--examples/exception_path/Makefile1
-rw-r--r--examples/exception_path/main.c2
-rw-r--r--examples/ip_fragmentation/main.c103
-rw-r--r--examples/ip_pipeline/Makefile2
-rwxr-xr-xexamples/ip_pipeline/config/diagram-generator.py13
-rwxr-xr-xexamples/ip_pipeline/config/pipeline-to-core-mapping.py11
-rw-r--r--examples/ip_pipeline/config_parse.c6
-rw-r--r--examples/ip_pipeline/init.c27
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall_be.c6
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.c20
-rw-r--r--examples/ip_reassembly/main.c27
-rw-r--r--examples/ipsec-secgw/esp.c4
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c6
-rw-r--r--examples/ipsec-secgw/ipsec.c17
-rw-r--r--examples/ipsec-secgw/ipsec.h2
-rw-r--r--examples/ipsec-secgw/sa.c6
-rw-r--r--examples/ipv4_multicast/main.c12
-rw-r--r--examples/kni/main.c2
-rw-r--r--examples/l2fwd-cat/cat.c2
-rw-r--r--examples/l2fwd-crypto/main.c180
-rw-r--r--examples/l2fwd-jobstats/main.c4
-rw-r--r--examples/l2fwd-keepalive/main.c35
-rw-r--r--examples/l2fwd-keepalive/shm.c10
-rw-r--r--examples/l2fwd-keepalive/shm.h9
-rw-r--r--examples/l2fwd/main.c34
-rw-r--r--examples/l3fwd-acl/main.c4
-rw-r--r--examples/l3fwd-power/main.c131
-rw-r--r--examples/l3fwd-vf/main.c4
-rw-r--r--examples/l3fwd/l3fwd_lpm.h2
-rw-r--r--examples/l3fwd/l3fwd_lpm_sse.h24
-rw-r--r--examples/l3fwd/main.c173
-rw-r--r--examples/link_status_interrupt/main.c4
-rw-r--r--examples/load_balancer/config.c2
-rw-r--r--examples/load_balancer/init.c2
-rw-r--r--examples/load_balancer/runtime.c38
-rw-r--r--examples/multi_process/client_server_mp/mp_client/client.c9
-rw-r--r--examples/multi_process/client_server_mp/mp_server/main.c2
-rw-r--r--examples/multi_process/l2fwd_fork/main.c19
-rw-r--r--examples/multi_process/symmetric_mp/main.c4
-rw-r--r--examples/netmap_compat/bridge/Makefile1
-rw-r--r--examples/netmap_compat/bridge/bridge.c2
-rw-r--r--examples/packet_ordering/main.c21
-rw-r--r--examples/performance-thread/common/arch/x86/ctx.h8
-rw-r--r--examples/performance-thread/common/common.mk20
-rw-r--r--examples/performance-thread/common/lthread.h8
-rw-r--r--examples/performance-thread/common/lthread_api.h8
-rw-r--r--examples/performance-thread/common/lthread_cond.h8
-rw-r--r--examples/performance-thread/common/lthread_diag.h9
-rw-r--r--examples/performance-thread/common/lthread_diag_api.h8
-rw-r--r--examples/performance-thread/common/lthread_int.h8
-rw-r--r--examples/performance-thread/common/lthread_mutex.h8
-rw-r--r--examples/performance-thread/common/lthread_objcache.h7
-rw-r--r--examples/performance-thread/common/lthread_pool.h7
-rw-r--r--examples/performance-thread/common/lthread_queue.h7
-rw-r--r--examples/performance-thread/common/lthread_sched.h7
-rw-r--r--examples/performance-thread/common/lthread_timer.h24
-rw-r--r--examples/performance-thread/common/lthread_tls.c1
-rw-r--r--examples/performance-thread/common/lthread_tls.h7
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c127
-rw-r--r--examples/performance-thread/pthread_shim/main.c8
-rw-r--r--examples/performance-thread/pthread_shim/pthread_shim.c38
-rw-r--r--examples/performance-thread/pthread_shim/pthread_shim.h3
-rw-r--r--examples/ptpclient/ptpclient.c2
-rw-r--r--examples/qos_meter/main.c4
-rw-r--r--examples/qos_sched/Makefile1
-rw-r--r--examples/qos_sched/app_thread.c14
-rw-r--r--examples/qos_sched/init.c2
-rw-r--r--examples/quota_watermark/qw/args.c63
-rw-r--r--examples/quota_watermark/qw/init.c178
-rw-r--r--examples/quota_watermark/qw/main.c382
-rw-r--r--examples/quota_watermark/qw/main.h7
-rw-r--r--examples/quota_watermark/qwctl/commands.c208
-rw-r--r--examples/quota_watermark/qwctl/qwctl.c40
-rw-r--r--examples/quota_watermark/qwctl/qwctl.h1
-rw-r--r--examples/server_node_efd/Makefile44
-rw-r--r--examples/server_node_efd/node/Makefile48
-rw-r--r--examples/server_node_efd/node/node.c417
-rw-r--r--examples/server_node_efd/server/Makefile (renamed from examples/dpdk_qat/Makefile)52
-rw-r--r--examples/server_node_efd/server/args.c200
-rw-r--r--examples/server_node_efd/server/args.h39
-rw-r--r--examples/server_node_efd/server/init.c371
-rw-r--r--examples/server_node_efd/server/init.h (renamed from examples/dpdk_qat/crypto.h)86
-rw-r--r--examples/server_node_efd/server/main.c362
-rw-r--r--examples/server_node_efd/shared/common.h99
-rw-r--r--examples/tep_termination/main.c43
-rw-r--r--examples/tep_termination/main.h2
-rw-r--r--examples/tep_termination/vxlan_setup.c4
-rw-r--r--examples/vhost/Makefile2
-rw-r--r--examples/vhost/main.c118
-rw-r--r--examples/vhost/main.h32
-rw-r--r--examples/vhost/virtio_net.c403
-rw-r--r--examples/vhost_xen/main.c22
108 files changed, 3943 insertions, 3711 deletions
diff --git a/examples/Makefile b/examples/Makefile
index d49c7f29..6298626b 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -40,11 +40,9 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bond
DIRS-y += cmdline
DIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += distributor
-ifneq ($(ICP_ROOT),)
-DIRS-y += dpdk_qat
-endif
DIRS-y += ethtool
DIRS-y += exception_path
+DIRS-$(CONFIG_RTE_LIBRTE_EFD) += server_node_efd
DIRS-y += helloworld
DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += ip_pipeline
ifeq ($(CONFIG_RTE_LIBRTE_LPM),y)
@@ -77,6 +75,9 @@ DIRS-$(CONFIG_RTE_LIBRTE_LPM) += load_balancer
DIRS-y += multi_process
DIRS-y += netmap_compat/bridge
DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
+ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+DIRS-y += performance-thread
+endif
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
diff --git a/examples/bond/Makefile b/examples/bond/Makefile
index 626d79d9..ae4cb6e1 100644
--- a/examples/bond/Makefile
+++ b/examples/bond/Makefile
@@ -54,4 +54,8 @@ endif
CFLAGS += -O3
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+LDLIBS += -lrte_pmd_bond
+endif
+
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 6402c6b3..9a4ec807 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -160,7 +160,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 537cee1f..8071f919 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -1,8 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -45,33 +44,65 @@
#include <rte_prefetch.h>
#include <rte_distributor.h>
-#define RX_RING_SIZE 256
+#define RX_RING_SIZE 512
#define TX_RING_SIZE 512
#define NUM_MBUFS ((64*1024)-1)
-#define MBUF_CACHE_SIZE 250
-#define BURST_SIZE 32
-#define RTE_RING_SZ 1024
+#define MBUF_CACHE_SIZE 128
+#define BURST_SIZE 64
+#define SCHED_RX_RING_SZ 8192
+#define SCHED_TX_RING_SZ 65536
+#define BURST_SIZE_TX 32
#define RTE_LOGTYPE_DISTRAPP RTE_LOGTYPE_USER1
+#define ANSI_COLOR_RED "\x1b[31m"
+#define ANSI_COLOR_RESET "\x1b[0m"
+
/* mask of enabled ports */
static uint32_t enabled_port_mask;
volatile uint8_t quit_signal;
volatile uint8_t quit_signal_rx;
+volatile uint8_t quit_signal_dist;
+volatile uint8_t quit_signal_work;
static volatile struct app_stats {
struct {
uint64_t rx_pkts;
uint64_t returned_pkts;
uint64_t enqueued_pkts;
+ uint64_t enqdrop_pkts;
} rx __rte_cache_aligned;
+ int pad1 __rte_cache_aligned;
+
+ struct {
+ uint64_t in_pkts;
+ uint64_t ret_pkts;
+ uint64_t sent_pkts;
+ uint64_t enqdrop_pkts;
+ } dist __rte_cache_aligned;
+ int pad2 __rte_cache_aligned;
struct {
uint64_t dequeue_pkts;
uint64_t tx_pkts;
+ uint64_t enqdrop_pkts;
} tx __rte_cache_aligned;
+ int pad3 __rte_cache_aligned;
+
+ uint64_t worker_pkts[64] __rte_cache_aligned;
+
+ int pad4 __rte_cache_aligned;
+
+ uint64_t worker_bursts[64][8] __rte_cache_aligned;
+
+ int pad5 __rte_cache_aligned;
+
+ uint64_t port_rx_pkts[64] __rte_cache_aligned;
+ uint64_t port_tx_pkts[64] __rte_cache_aligned;
} app_stats;
+struct app_stats prev_app_stats;
+
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
@@ -93,6 +124,8 @@ struct output_buffer {
struct rte_mbuf *mbufs[BURST_SIZE];
};
+static void print_stats(void);
+
/*
* Initialises a given port using global settings and with the rx buffers
* coming from the mbuf_pool passed as parameter
@@ -134,7 +167,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_link link;
rte_eth_link_get_nowait(port, &link);
- if (!link.link_status) {
+ while (!link.link_status) {
+ printf("Waiting for Link up on port %"PRIu8"\n", port);
sleep(1);
rte_eth_link_get_nowait(port, &link);
}
@@ -161,40 +195,18 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct lcore_params {
unsigned worker_id;
struct rte_distributor *d;
- struct rte_ring *r;
+ struct rte_ring *rx_dist_ring;
+ struct rte_ring *dist_tx_ring;
struct rte_mempool *mem_pool;
};
static int
-quit_workers(struct rte_distributor *d, struct rte_mempool *p)
-{
- const unsigned num_workers = rte_lcore_count() - 2;
- unsigned i;
- struct rte_mbuf *bufs[num_workers];
-
- if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
- printf("line %d: Error getting mbufs from pool\n", __LINE__);
- return -1;
- }
-
- for (i = 0; i < num_workers; i++)
- bufs[i]->hash.rss = i << 1;
-
- rte_distributor_process(d, bufs, num_workers);
- rte_mempool_put_bulk(p, (void *)bufs, num_workers);
-
- return 0;
-}
-
-static int
lcore_rx(struct lcore_params *p)
{
- struct rte_distributor *d = p->d;
- struct rte_mempool *mem_pool = p->mem_pool;
- struct rte_ring *r = p->r;
const uint8_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
uint8_t port;
+ struct rte_mbuf *bufs[BURST_SIZE*2];
for (port = 0; port < nb_ports; port++) {
/* skip ports that are not enabled */
@@ -218,7 +230,6 @@ lcore_rx(struct lcore_params *p)
port = 0;
continue;
}
- struct rte_mbuf *bufs[BURST_SIZE*2];
const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs,
BURST_SIZE);
if (unlikely(nb_rx == 0)) {
@@ -228,9 +239,15 @@ lcore_rx(struct lcore_params *p)
}
app_stats.rx.rx_pkts += nb_rx;
- rte_distributor_process(d, bufs, nb_rx);
- const uint16_t nb_ret = rte_distributor_returned_pkts(d,
- bufs, BURST_SIZE*2);
+/*
+ * You can run the distributor on the rx core with this code. Returned
+ * packets are then send straight to the tx core.
+ */
+#if 0
+ rte_distributor_process(d, bufs, nb_rx);
+ const uint16_t nb_ret = rte_distributor_returned_pktsd,
+ bufs, BURST_SIZE*2);
+
app_stats.rx.returned_pkts += nb_ret;
if (unlikely(nb_ret == 0)) {
if (++port == nb_ports)
@@ -238,10 +255,26 @@ lcore_rx(struct lcore_params *p)
continue;
}
- uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
+ struct rte_ring *tx_ring = p->dist_tx_ring;
+ uint16_t sent = rte_ring_enqueue_burst(tx_ring,
+ (void *)bufs, nb_ret, NULL);
+#else
+ uint16_t nb_ret = nb_rx;
+ /*
+ * Swap the following two lines if you want the rx traffic
+ * to go directly to tx, no distribution.
+ */
+ struct rte_ring *out_ring = p->rx_dist_ring;
+ /* struct rte_ring *out_ring = p->dist_tx_ring; */
+
+ uint16_t sent = rte_ring_enqueue_burst(out_ring,
+ (void *)bufs, nb_ret, NULL);
+#endif
+
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
- RTE_LOG(DEBUG, DISTRAPP,
+ app_stats.rx.enqdrop_pkts += nb_ret - sent;
+ RTE_LOG_DP(DEBUG, DISTRAPP,
"%s:Packet loss due to full ring\n", __func__);
while (sent < nb_ret)
rte_pktmbuf_free(bufs[sent++]);
@@ -249,33 +282,21 @@ lcore_rx(struct lcore_params *p)
if (++port == nb_ports)
port = 0;
}
- rte_distributor_process(d, NULL, 0);
- /* flush distributor to bring to known state */
- rte_distributor_flush(d);
/* set worker & tx threads quit flag */
+ printf("\nCore %u exiting rx task.\n", rte_lcore_id());
quit_signal = 1;
- /*
- * worker threads may hang in get packet as
- * distributor process is not running, just make sure workers
- * get packets till quit_signal is actually been
- * received and they gracefully shutdown
- */
- if (quit_workers(d, mem_pool) != 0)
- return -1;
- /* rx thread should quit at last */
return 0;
}
static inline void
flush_one_port(struct output_buffer *outbuf, uint8_t outp)
{
- unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs,
- outbuf->count);
- app_stats.tx.tx_pkts += nb_tx;
+ unsigned int nb_tx = rte_eth_tx_burst(outp, 0,
+ outbuf->mbufs, outbuf->count);
+ app_stats.tx.tx_pkts += outbuf->count;
if (unlikely(nb_tx < outbuf->count)) {
- RTE_LOG(DEBUG, DISTRAPP,
- "%s:Packet loss with tx_burst\n", __func__);
+ app_stats.tx.enqdrop_pkts += outbuf->count - nb_tx;
do {
rte_pktmbuf_free(outbuf->mbufs[nb_tx]);
} while (++nb_tx < outbuf->count);
@@ -287,6 +308,7 @@ static inline void
flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
{
uint8_t outp;
+
for (outp = 0; outp < nb_ports; outp++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << outp)) == 0)
@@ -299,6 +321,58 @@ flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
}
}
+
+
+static int
+lcore_distributor(struct lcore_params *p)
+{
+ struct rte_ring *in_r = p->rx_dist_ring;
+ struct rte_ring *out_r = p->dist_tx_ring;
+ struct rte_mbuf *bufs[BURST_SIZE * 4];
+ struct rte_distributor *d = p->d;
+
+ printf("\nCore %u acting as distributor core.\n", rte_lcore_id());
+ while (!quit_signal_dist) {
+ const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
+ (void *)bufs, BURST_SIZE*1, NULL);
+ if (nb_rx) {
+ app_stats.dist.in_pkts += nb_rx;
+
+ /* Distribute the packets */
+ rte_distributor_process(d, bufs, nb_rx);
+ /* Handle Returns */
+ const uint16_t nb_ret =
+ rte_distributor_returned_pkts(d,
+ bufs, BURST_SIZE*2);
+
+ if (unlikely(nb_ret == 0))
+ continue;
+ app_stats.dist.ret_pkts += nb_ret;
+
+ uint16_t sent = rte_ring_enqueue_burst(out_r,
+ (void *)bufs, nb_ret, NULL);
+ app_stats.dist.sent_pkts += sent;
+ if (unlikely(sent < nb_ret)) {
+ app_stats.dist.enqdrop_pkts += nb_ret - sent;
+ RTE_LOG(DEBUG, DISTRAPP,
+ "%s:Packet loss due to full out ring\n",
+ __func__);
+ while (sent < nb_ret)
+ rte_pktmbuf_free(bufs[sent++]);
+ }
+ }
+ }
+ printf("\nCore %u exiting distributor task.\n", rte_lcore_id());
+ quit_signal_work = 1;
+
+ rte_distributor_flush(d);
+ /* Unblock any returns so workers can exit */
+ rte_distributor_clear_returns(d);
+ quit_signal_rx = 1;
+ return 0;
+}
+
+
static int
lcore_tx(struct rte_ring *in_r)
{
@@ -327,9 +401,9 @@ lcore_tx(struct rte_ring *in_r)
if ((enabled_port_mask & (1 << port)) == 0)
continue;
- struct rte_mbuf *bufs[BURST_SIZE];
+ struct rte_mbuf *bufs[BURST_SIZE_TX];
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
- (void *)bufs, BURST_SIZE);
+ (void *)bufs, BURST_SIZE_TX, NULL);
app_stats.tx.dequeue_pkts += nb_rx;
/* if we get no traffic, flush anything we have */
@@ -358,11 +432,12 @@ lcore_tx(struct rte_ring *in_r)
outbuf = &tx_buffers[outp];
outbuf->mbufs[outbuf->count++] = bufs[i];
- if (outbuf->count == BURST_SIZE)
+ if (outbuf->count == BURST_SIZE_TX)
flush_one_port(outbuf, outp);
}
}
}
+ printf("\nCore %u exiting tx task.\n", rte_lcore_id());
return 0;
}
@@ -371,32 +446,98 @@ int_handler(int sig_num)
{
printf("Exiting on signal %d\n", sig_num);
/* set quit flag for rx thread to exit */
- quit_signal_rx = 1;
+ quit_signal_dist = 1;
}
static void
print_stats(void)
{
struct rte_eth_stats eth_stats;
- unsigned i;
-
- printf("\nRX thread stats:\n");
- printf(" - Received: %"PRIu64"\n", app_stats.rx.rx_pkts);
- printf(" - Processed: %"PRIu64"\n", app_stats.rx.returned_pkts);
- printf(" - Enqueued: %"PRIu64"\n", app_stats.rx.enqueued_pkts);
-
- printf("\nTX thread stats:\n");
- printf(" - Dequeued: %"PRIu64"\n", app_stats.tx.dequeue_pkts);
- printf(" - Transmitted: %"PRIu64"\n", app_stats.tx.tx_pkts);
+ unsigned int i, j;
+ const unsigned int num_workers = rte_lcore_count() - 4;
for (i = 0; i < rte_eth_dev_count(); i++) {
rte_eth_stats_get(i, &eth_stats);
- printf("\nPort %u stats:\n", i);
- printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets);
- printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets);
- printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors);
- printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors);
- printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf);
+ app_stats.port_rx_pkts[i] = eth_stats.ipackets;
+ app_stats.port_tx_pkts[i] = eth_stats.opackets;
+ }
+
+ printf("\n\nRX Thread:\n");
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ printf("Port %u Pktsin : %5.2f\n", i,
+ (app_stats.port_rx_pkts[i] -
+ prev_app_stats.port_rx_pkts[i])/1000000.0);
+ prev_app_stats.port_rx_pkts[i] = app_stats.port_rx_pkts[i];
+ }
+ printf(" - Received: %5.2f\n",
+ (app_stats.rx.rx_pkts -
+ prev_app_stats.rx.rx_pkts)/1000000.0);
+ printf(" - Returned: %5.2f\n",
+ (app_stats.rx.returned_pkts -
+ prev_app_stats.rx.returned_pkts)/1000000.0);
+ printf(" - Enqueued: %5.2f\n",
+ (app_stats.rx.enqueued_pkts -
+ prev_app_stats.rx.enqueued_pkts)/1000000.0);
+ printf(" - Dropped: %s%5.2f%s\n", ANSI_COLOR_RED,
+ (app_stats.rx.enqdrop_pkts -
+ prev_app_stats.rx.enqdrop_pkts)/1000000.0,
+ ANSI_COLOR_RESET);
+
+ printf("Distributor thread:\n");
+ printf(" - In: %5.2f\n",
+ (app_stats.dist.in_pkts -
+ prev_app_stats.dist.in_pkts)/1000000.0);
+ printf(" - Returned: %5.2f\n",
+ (app_stats.dist.ret_pkts -
+ prev_app_stats.dist.ret_pkts)/1000000.0);
+ printf(" - Sent: %5.2f\n",
+ (app_stats.dist.sent_pkts -
+ prev_app_stats.dist.sent_pkts)/1000000.0);
+ printf(" - Dropped %s%5.2f%s\n", ANSI_COLOR_RED,
+ (app_stats.dist.enqdrop_pkts -
+ prev_app_stats.dist.enqdrop_pkts)/1000000.0,
+ ANSI_COLOR_RESET);
+
+ printf("TX thread:\n");
+ printf(" - Dequeued: %5.2f\n",
+ (app_stats.tx.dequeue_pkts -
+ prev_app_stats.tx.dequeue_pkts)/1000000.0);
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ printf("Port %u Pktsout: %5.2f\n",
+ i, (app_stats.port_tx_pkts[i] -
+ prev_app_stats.port_tx_pkts[i])/1000000.0);
+ prev_app_stats.port_tx_pkts[i] = app_stats.port_tx_pkts[i];
+ }
+ printf(" - Transmitted: %5.2f\n",
+ (app_stats.tx.tx_pkts -
+ prev_app_stats.tx.tx_pkts)/1000000.0);
+ printf(" - Dropped: %s%5.2f%s\n", ANSI_COLOR_RED,
+ (app_stats.tx.enqdrop_pkts -
+ prev_app_stats.tx.enqdrop_pkts)/1000000.0,
+ ANSI_COLOR_RESET);
+
+ prev_app_stats.rx.rx_pkts = app_stats.rx.rx_pkts;
+ prev_app_stats.rx.returned_pkts = app_stats.rx.returned_pkts;
+ prev_app_stats.rx.enqueued_pkts = app_stats.rx.enqueued_pkts;
+ prev_app_stats.rx.enqdrop_pkts = app_stats.rx.enqdrop_pkts;
+ prev_app_stats.dist.in_pkts = app_stats.dist.in_pkts;
+ prev_app_stats.dist.ret_pkts = app_stats.dist.ret_pkts;
+ prev_app_stats.dist.sent_pkts = app_stats.dist.sent_pkts;
+ prev_app_stats.dist.enqdrop_pkts = app_stats.dist.enqdrop_pkts;
+ prev_app_stats.tx.dequeue_pkts = app_stats.tx.dequeue_pkts;
+ prev_app_stats.tx.tx_pkts = app_stats.tx.tx_pkts;
+ prev_app_stats.tx.enqdrop_pkts = app_stats.tx.enqdrop_pkts;
+
+ for (i = 0; i < num_workers; i++) {
+ printf("Worker %02u Pkts: %5.2f. Bursts(1-8): ", i,
+ (app_stats.worker_pkts[i] -
+ prev_app_stats.worker_pkts[i])/1000000.0);
+ for (j = 0; j < 8; j++) {
+ printf("%"PRIu64" ", app_stats.worker_bursts[i][j]);
+ app_stats.worker_bursts[i][j] = 0;
+ }
+ printf("\n");
+ prev_app_stats.worker_pkts[i] = app_stats.worker_pkts[i];
}
}
@@ -405,17 +546,36 @@ lcore_worker(struct lcore_params *p)
{
struct rte_distributor *d = p->d;
const unsigned id = p->worker_id;
+ unsigned int num = 0;
+ unsigned int i;
+
/*
* for single port, xor_val will be zero so we won't modify the output
* port, otherwise we send traffic from 0 to 1, 2 to 3, and vice versa
*/
const unsigned xor_val = (rte_eth_dev_count() > 1);
- struct rte_mbuf *buf = NULL;
+ struct rte_mbuf *buf[8] __rte_cache_aligned;
+
+ for (i = 0; i < 8; i++)
+ buf[i] = NULL;
+
+ app_stats.worker_pkts[p->worker_id] = 1;
printf("\nCore %u acting as worker core.\n", rte_lcore_id());
- while (!quit_signal) {
- buf = rte_distributor_get_pkt(d, id, buf);
- buf->port ^= xor_val;
+ while (!quit_signal_work) {
+ num = rte_distributor_get_pkt(d, id, buf, buf, num);
+ /* Do a little bit of work for each packet */
+ for (i = 0; i < num; i++) {
+ uint64_t t = rte_rdtsc()+100;
+
+ while (rte_rdtsc() < t)
+ rte_pause();
+ buf[i]->port ^= xor_val;
+ }
+
+ app_stats.worker_pkts[p->worker_id] += num;
+ if (num > 0)
+ app_stats.worker_bursts[p->worker_id][num-1]++;
}
return 0;
}
@@ -487,7 +647,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return 0;
}
@@ -497,11 +657,13 @@ main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
struct rte_distributor *d;
- struct rte_ring *output_ring;
+ struct rte_ring *dist_tx_ring;
+ struct rte_ring *rx_dist_ring;
unsigned lcore_id, worker_id = 0;
unsigned nb_ports;
uint8_t portid;
uint8_t nb_ports_available;
+ uint64_t t, freq;
/* catch ctrl-c so we can print on exit */
signal(SIGINT, int_handler);
@@ -518,10 +680,12 @@ main(int argc, char *argv[])
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid distributor parameters\n");
- if (rte_lcore_count() < 3)
+ if (rte_lcore_count() < 5)
rte_exit(EXIT_FAILURE, "Error, This application needs at "
- "least 3 logical cores to run:\n"
- "1 lcore for packet RX and distribution\n"
+ "least 5 logical cores to run:\n"
+ "1 lcore for stats (can be core 0)\n"
+ "1 lcore for packet RX\n"
+ "1 lcore for distribution\n"
"1 lcore for packet TX\n"
"and at least 1 lcore for worker threads\n");
@@ -561,40 +725,82 @@ main(int argc, char *argv[])
}
d = rte_distributor_create("PKT_DIST", rte_socket_id(),
- rte_lcore_count() - 2);
+ rte_lcore_count() - 4,
+ RTE_DIST_ALG_BURST);
if (d == NULL)
rte_exit(EXIT_FAILURE, "Cannot create distributor\n");
/*
- * scheduler ring is read only by the transmitter core, but written to
- * by multiple threads
+ * scheduler ring is read by the transmitter core, and written to
+ * by scheduler core
*/
- output_ring = rte_ring_create("Output_ring", RTE_RING_SZ,
- rte_socket_id(), RING_F_SC_DEQ);
- if (output_ring == NULL)
+ dist_tx_ring = rte_ring_create("Output_ring", SCHED_TX_RING_SZ,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (dist_tx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create output ring\n");
+
+ rx_dist_ring = rte_ring_create("Input_ring", SCHED_RX_RING_SZ,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (rx_dist_ring == NULL)
rte_exit(EXIT_FAILURE, "Cannot create output ring\n");
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (worker_id == rte_lcore_count() - 2)
+ if (worker_id == rte_lcore_count() - 3) {
+ printf("Starting distributor on lcore_id %d\n",
+ lcore_id);
+ /* distributor core */
+ struct lcore_params *p =
+ rte_malloc(NULL, sizeof(*p), 0);
+ if (!p)
+ rte_panic("malloc failure\n");
+ *p = (struct lcore_params){worker_id, d,
+ rx_dist_ring, dist_tx_ring, mbuf_pool};
+ rte_eal_remote_launch(
+ (lcore_function_t *)lcore_distributor,
+ p, lcore_id);
+ } else if (worker_id == rte_lcore_count() - 4) {
+ printf("Starting tx on worker_id %d, lcore_id %d\n",
+ worker_id, lcore_id);
+ /* tx core */
rte_eal_remote_launch((lcore_function_t *)lcore_tx,
- output_ring, lcore_id);
- else {
+ dist_tx_ring, lcore_id);
+ } else if (worker_id == rte_lcore_count() - 2) {
+ printf("Starting rx on worker_id %d, lcore_id %d\n",
+ worker_id, lcore_id);
+ /* rx core */
struct lcore_params *p =
rte_malloc(NULL, sizeof(*p), 0);
if (!p)
rte_panic("malloc failure\n");
- *p = (struct lcore_params){worker_id, d, output_ring, mbuf_pool};
+ *p = (struct lcore_params){worker_id, d, rx_dist_ring,
+ dist_tx_ring, mbuf_pool};
+ rte_eal_remote_launch((lcore_function_t *)lcore_rx,
+ p, lcore_id);
+ } else {
+ printf("Starting worker on worker_id %d, lcore_id %d\n",
+ worker_id, lcore_id);
+ struct lcore_params *p =
+ rte_malloc(NULL, sizeof(*p), 0);
+ if (!p)
+ rte_panic("malloc failure\n");
+ *p = (struct lcore_params){worker_id, d, rx_dist_ring,
+ dist_tx_ring, mbuf_pool};
rte_eal_remote_launch((lcore_function_t *)lcore_worker,
p, lcore_id);
}
worker_id++;
}
- /* call lcore_main on master core only */
- struct lcore_params p = { 0, d, output_ring, mbuf_pool};
- if (lcore_rx(&p) != 0)
- return -1;
+ freq = rte_get_timer_hz();
+ t = rte_rdtsc() + freq;
+ while (!quit_signal_dist) {
+ if (t < rte_rdtsc()) {
+ print_stats();
+ t = rte_rdtsc() + freq;
+ }
+ usleep(1000);
+ }
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
diff --git a/examples/dpdk_qat/config_files/coleto/dh895xcc_qa_dev0.conf b/examples/dpdk_qat/config_files/coleto/dh895xcc_qa_dev0.conf
deleted file mode 100644
index fd139e2f..00000000
--- a/examples/dpdk_qat/config_files/coleto/dh895xcc_qa_dev0.conf
+++ /dev/null
@@ -1,65 +0,0 @@
-[GENERAL]
-ServicesEnabled = cy;dc
-ConfigVersion = 2
-cyHmacAuthMode = 1
-dcTotalSRAMAvailable = 0
-Firmware_MofPath = dh895xcc/mof_firmware.bin
-Firmware_MmpPath = dh895xcc/mmp_firmware.bin
-statsGeneral = 1
-statsDc = 1
-statsDh = 1
-statsDrbg = 1
-statsDsa = 1
-statsEcc = 1
-statsKeyGen = 1
-statsLn = 1
-statsPrime = 1
-statsRsa = 1
-statsSym = 1
-SRIOV_Enabled = 0
-ProcDebug = 1
-
-[KERNEL]
-NumberCyInstances = 0
-NumberDcInstances = 0
-
-[SSL]
-NumberCyInstances = 8
-NumberDcInstances = 0
-NumProcesses = 1
-LimitDevAccess = 0
-
-Cy0Name = "SSL0"
-Cy0IsPolled = 1
-Cy0CoreAffinity = 0
-
-Cy1Name = "SSL1"
-Cy1IsPolled = 1
-Cy1CoreAffinity = 1
-
-Cy2Name = "SSL2"
-Cy2IsPolled = 1
-Cy2CoreAffinity = 2
-
-Cy3Name = "SSL3"
-Cy3IsPolled = 1
-Cy3CoreAffinity = 3
-
-
-Cy4Name = "SSL4"
-Cy4IsPolled = 1
-Cy4CoreAffinity = 4
-
-
-Cy5Name = "SSL5"
-Cy5IsPolled = 1
-Cy5CoreAffinity = 5
-
-Cy6Name = "SSL6"
-Cy6IsPolled = 1
-Cy6CoreAffinity = 6
-
-
-Cy7Name = "SSL7"
-Cy7IsPolled = 1
-Cy7CoreAffinity = 7
diff --git a/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev0.conf b/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev0.conf
deleted file mode 100644
index 9e1c1d11..00000000
--- a/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev0.conf
+++ /dev/null
@@ -1,293 +0,0 @@
-#########################################################################
-#
-# @par
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# #########################################################################
-# ########################################################
-#
-# This file is the configuration for a single dh89xxcc_qa
-# device.
-#
-# Each device has up to two accelerators.
-# - The client may load balance between these
-# accelerators.
-# Each accelerator has 8 independent ring banks.
-# - The interrupt for each can be directed to a
-# specific core.
-# Each ring bank as 16 rings (hardware assisted queues).
-#
-#########################################################
-# General Section
-##############################################
-
-[GENERAL]
-ServicesEnabled = cy0;cy1
-
-# Use version 2 of the config file
-ConfigVersion = 2
-# Look Aside Cryptographic Configuration
-cyHmacAuthMode = 1
-
-# Look Aside Compression Configuration
-dcTotalSRAMAvailable = 0
-
-# Firmware Location Configuration
-Firmware_MofPath = mof_firmware.bin
-Firmware_MmpPath = mmp_firmware.bin
-
-#Default values for number of concurrent requests*/
-CyNumConcurrentSymRequests = 512
-CyNumConcurrentAsymRequests = 64
-DcNumConcurrentRequests = 512
-
-#Statistics, valid values: 1,0
-statsGeneral = 1
-statsDc = 1
-statsDh = 1
-statsDrbg = 1
-statsDsa = 1
-statsEcc = 1
-statsKeyGen = 1
-statsLn = 1
-statsPrime = 1
-statsRsa = 1
-statsSym = 1
-
-# Enables or disables Single Root Complex IO Virtualization.
-# If this is enabled (1) then SRIOV and VT-d need to be enabled in
-# BIOS and there can be no Cy or Dc instances created in PF (Dom0).
-# If this i disabled (0) then SRIOV and VT-d need to be disabled
-# in BIOS and Cy and/or Dc instances can be used in PF (Dom0)
-SRIOV_Enabled = 0
-
-#Debug feature, if set to 1 it enables additional entries in /proc filesystem
-ProcDebug = 1
-
-#######################################################
-#
-# Logical Instances Section
-# A logical instance allows each address domain
-# (kernel space and individual user space processes)
-# to configure rings (i.e. hardware assisted queues)
-# to be used by that address domain and to define the
-# behavior of that ring.
-#
-# The address domains are in the following format
-# - For kernel address domains
-# [KERNEL]
-# - For user process address domains
-# [xxxxx]
-# Where xxxxx may be any ascii value which uniquely identifies
-# the user mode process.
-# To allow the driver correctly configure the
-# logical instances associated with this user process,
-# the process must call the icp_sal_userStartMultiProcess(...)
-# passing the xxxxx string during process initialisation.
-# When the user space process is finished it must call
-# icp_sal_userStop(...) to free resources.
-# NumProcesses will indicate the maximum number of processes
-# that can call icp_sal_userStartMultiProcess on this instance.
-# Warning: the resources are preallocated: if NumProcesses
-# is too high, the driver will fail to load
-#
-# Items configurable by a logical instance are:
-# - Name of the logical instance
-# - The accelerator associated with this logical
-# instance
-# - The core the instance is affinitized to (optional)
-#
-# Note: Logical instances may not share the same ring, but
-# may share a ring bank.
-#
-# The format of the logical instances are:
-# - For crypto:
-# Cy<n>Name = "xxxx"
-# Cy<n>AcceleratorNumber = 0-3
-# Cy<n>CoreAffinity = 0-7
-#
-# - For Data Compression
-# Dc<n>Name = "xxxx"
-# Dc<n>AcceleratorNumber = 0-1
-# Dc<n>CoreAffinity = 0-7
-#
-# Where:
-# - n is the number of this logical instance starting at 0.
-# - xxxx may be any ascii value which identifies the logical instance.
-#
-# Note: for user space processes, a list of values can be specified for
-# the accelerator number and the core affinity: for example
-# Cy0AcceleratorNumber = 0,2
-# Cy0CoreAffinity = 0,2,4
-# These comma-separated lists will allow the multiple processes to use
-# different accelerators and cores, and will wrap around the numbers
-# in the list. In the above example, process 0 will use accelerator 0,
-# and process 1 will use accelerator 2
-#
-########################################################
-
-##############################################
-# Kernel Instances Section
-##############################################
-[KERNEL]
-NumberCyInstances = 0
-NumberDcInstances = 0
-
-##############################################
-# User Process Instance Section
-##############################################
-[SSL]
-NumberCyInstances = 16
-NumberDcInstances = 0
-NumProcesses = 1
-LimitDevAccess = 0
-
-# Crypto - User instance #0
-Cy0Name = "SSL0"
-Cy0IsPolled = 1
-Cy0AcceleratorNumber = 0
-# List of core affinities
-Cy0CoreAffinity = 0
-
-# Crypto - User instance #1
-Cy1Name = "SSL1"
-Cy1IsPolled = 1
-Cy1AcceleratorNumber = 1
-# List of core affinities
-Cy1CoreAffinity = 1
-
-# Crypto - User instance #2
-Cy2Name = "SSL2"
-Cy2IsPolled = 1
-Cy2AcceleratorNumber = 2
-# List of core affinities
-Cy2CoreAffinity = 2
-
-# Crypto - User instance #3
-Cy3Name = "SSL3"
-Cy3IsPolled = 1
-Cy3AcceleratorNumber = 3
-# List of core affinities
-Cy3CoreAffinity = 3
-
-# Crypto - User instance #4
-Cy4Name = "SSL4"
-Cy4IsPolled = 1
-Cy4AcceleratorNumber = 0
-# List of core affinities
-Cy4CoreAffinity = 4
-
-# Crypto - User instance #5
-Cy5Name = "SSL5"
-Cy5IsPolled = 1
-Cy5AcceleratorNumber = 1
-# List of core affinities
-Cy5CoreAffinity = 5
-
-# Crypto - User instance #6
-Cy6Name = "SSL6"
-Cy6IsPolled = 1
-Cy6AcceleratorNumber = 2
-# List of core affinities
-Cy6CoreAffinity = 6
-
-# Crypto - User instance #7
-Cy7Name = "SSL7"
-Cy7IsPolled = 1
-Cy7AcceleratorNumber = 3
-# List of core affinities
-Cy7CoreAffinity = 7
-
-# Crypto - User instance #8
-Cy8Name = "SSL8"
-Cy8IsPolled = 1
-Cy8AcceleratorNumber = 0
-# List of core affinities
-Cy8CoreAffinity = 16
-
-# Crypto - User instance #9
-Cy9Name = "SSL9"
-Cy9IsPolled = 1
-Cy9AcceleratorNumber = 1
-# List of core affinities
-Cy9CoreAffinity = 17
-
-# Crypto - User instance #10
-Cy10Name = "SSL10"
-Cy10IsPolled = 1
-Cy10AcceleratorNumber = 2
-# List of core affinities
-Cy10CoreAffinity = 18
-
-# Crypto - User instance #11
-Cy11Name = "SSL11"
-Cy11IsPolled = 1
-Cy11AcceleratorNumber = 3
-# List of core affinities
-Cy11CoreAffinity = 19
-
-# Crypto - User instance #12
-Cy12Name = "SSL12"
-Cy12IsPolled = 1
-Cy12AcceleratorNumber = 0
-# List of core affinities
-Cy12CoreAffinity = 20
-
-# Crypto - User instance #13
-Cy13Name = "SSL13"
-Cy13IsPolled = 1
-Cy13AcceleratorNumber = 1
-# List of core affinities
-Cy13CoreAffinity = 21
-
-# Crypto - User instance #14
-Cy14Name = "SSL14"
-Cy14IsPolled = 1
-Cy14AcceleratorNumber = 2
-# List of core affinities
-Cy14CoreAffinity = 22
-
-# Crypto - User instance #15
-Cy15Name = "SSL15"
-Cy15IsPolled = 1
-Cy15AcceleratorNumber = 3
-# List of core affinities
-Cy15CoreAffinity = 23
-
-
-
-##############################################
-# Wireless Process Instance Section
-##############################################
-[WIRELESS]
-NumberCyInstances = 0
-NumberDcInstances = 0
-NumProcesses = 0
diff --git a/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev1.conf b/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev1.conf
deleted file mode 100644
index 3e8d8b6b..00000000
--- a/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev1.conf
+++ /dev/null
@@ -1,292 +0,0 @@
-#########################################################################
-#
-# @par
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# #########################################################################
-# ########################################################
-#
-# This file is the configuration for a single dh89xxcc_qa
-# device.
-#
-# Each device has up to two accelerators.
-# - The client may load balance between these
-# accelerators.
-# Each accelerator has 8 independent ring banks.
-# - The interrupt for each can be directed to a
-# specific core.
-# Each ring bank as 16 rings (hardware assisted queues).
-#
-#########################################################
-# General Section
-##############################################
-
-[GENERAL]
-ServicesEnabled = cy0;cy1
-
-# Use version 2 of the config file
-ConfigVersion = 2
-# Look Aside Cryptographic Configuration
-cyHmacAuthMode = 1
-
-# Look Aside Compression Configuration
-dcTotalSRAMAvailable = 0
-
-# Firmware Location Configuration
-Firmware_MofPath = mof_firmware.bin
-Firmware_MmpPath = mmp_firmware.bin
-
-#Default values for number of concurrent requests*/
-CyNumConcurrentSymRequests = 512
-CyNumConcurrentAsymRequests = 64
-DcNumConcurrentRequests = 512
-
-#Statistics, valid values: 1,0
-statsGeneral = 1
-statsDc = 1
-statsDh = 1
-statsDrbg = 1
-statsDsa = 1
-statsEcc = 1
-statsKeyGen = 1
-statsLn = 1
-statsPrime = 1
-statsRsa = 1
-statsSym = 1
-
-# Enables or disables Single Root Complex IO Virtualization.
-# If this is enabled (1) then SRIOV and VT-d need to be enabled in
-# BIOS and there can be no Cy or Dc instances created in PF (Dom0).
-# If this i disabled (0) then SRIOV and VT-d need to be disabled
-# in BIOS and Cy and/or Dc instances can be used in PF (Dom0)
-SRIOV_Enabled = 0
-
-#Debug feature, if set to 1 it enables additional entries in /proc filesystem
-ProcDebug = 1
-
-#######################################################
-#
-# Logical Instances Section
-# A logical instance allows each address domain
-# (kernel space and individual user space processes)
-# to configure rings (i.e. hardware assisted queues)
-# to be used by that address domain and to define the
-# behavior of that ring.
-#
-# The address domains are in the following format
-# - For kernel address domains
-# [KERNEL]
-# - For user process address domains
-# [xxxxx]
-# Where xxxxx may be any ascii value which uniquely identifies
-# the user mode process.
-# To allow the driver correctly configure the
-# logical instances associated with this user process,
-# the process must call the icp_sal_userStartMultiProcess(...)
-# passing the xxxxx string during process initialisation.
-# When the user space process is finished it must call
-# icp_sal_userStop(...) to free resources.
-# NumProcesses will indicate the maximum number of processes
-# that can call icp_sal_userStartMultiProcess on this instance.
-# Warning: the resources are preallocated: if NumProcesses
-# is too high, the driver will fail to load
-#
-# Items configurable by a logical instance are:
-# - Name of the logical instance
-# - The accelerator associated with this logical
-# instance
-# - The core the instance is affinitized to (optional)
-#
-# Note: Logical instances may not share the same ring, but
-# may share a ring bank.
-#
-# The format of the logical instances are:
-# - For crypto:
-# Cy<n>Name = "xxxx"
-# Cy<n>AcceleratorNumber = 0-3
-# Cy<n>CoreAffinity = 0-7
-#
-# - For Data Compression
-# Dc<n>Name = "xxxx"
-# Dc<n>AcceleratorNumber = 0-1
-# Dc<n>CoreAffinity = 0-7
-#
-# Where:
-# - n is the number of this logical instance starting at 0.
-# - xxxx may be any ascii value which identifies the logical instance.
-#
-# Note: for user space processes, a list of values can be specified for
-# the accelerator number and the core affinity: for example
-# Cy0AcceleratorNumber = 0,2
-# Cy0CoreAffinity = 0,2,4
-# These comma-separated lists will allow the multiple processes to use
-# different accelerators and cores, and will wrap around the numbers
-# in the list. In the above example, process 0 will use accelerator 0,
-# and process 1 will use accelerator 2
-#
-########################################################
-
-##############################################
-# Kernel Instances Section
-##############################################
-[KERNEL]
-NumberCyInstances = 0
-NumberDcInstances = 0
-
-##############################################
-# User Process Instance Section
-##############################################
-[SSL]
-NumberCyInstances = 16
-NumberDcInstances = 0
-NumProcesses = 1
-LimitDevAccess = 0
-
-# Crypto - User instance #0
-Cy0Name = "SSL0"
-Cy0IsPolled = 1
-Cy0AcceleratorNumber = 0
-# List of core affinities
-Cy0CoreAffinity = 8
-
-# Crypto - User instance #1
-Cy1Name = "SSL1"
-Cy1IsPolled = 1
-Cy1AcceleratorNumber = 1
-# List of core affinities
-Cy1CoreAffinity = 9
-
-# Crypto - User instance #2
-Cy2Name = "SSL2"
-Cy2IsPolled = 1
-Cy2AcceleratorNumber = 2
-# List of core affinities
-Cy2CoreAffinity = 10
-
-# Crypto - User instance #3
-Cy3Name = "SSL3"
-Cy3IsPolled = 1
-Cy3AcceleratorNumber = 3
-# List of core affinities
-Cy3CoreAffinity = 11
-
-# Crypto - User instance #4
-Cy4Name = "SSL4"
-Cy4IsPolled = 1
-Cy4AcceleratorNumber = 0
-# List of core affinities
-Cy4CoreAffinity = 12
-
-# Crypto - User instance #5
-Cy5Name = "SSL5"
-Cy5IsPolled = 1
-Cy5AcceleratorNumber = 1
-# List of core affinities
-Cy5CoreAffinity = 13
-
-# Crypto - User instance #6
-Cy6Name = "SSL6"
-Cy6IsPolled = 1
-Cy6AcceleratorNumber = 2
-# List of core affinities
-Cy6CoreAffinity = 14
-
-# Crypto - User instance #7
-Cy7Name = "SSL7"
-Cy7IsPolled = 1
-Cy7AcceleratorNumber = 3
-# List of core affinities
-Cy7CoreAffinity = 15
-
-# Crypto - User instance #8
-Cy8Name = "SSL8"
-Cy8IsPolled = 1
-Cy8AcceleratorNumber = 0
-# List of core affinities
-Cy8CoreAffinity = 24
-
-# Crypto - User instance #9
-Cy9Name = "SSL9"
-Cy9IsPolled = 1
-Cy9AcceleratorNumber = 1
-# List of core affinities
-Cy9CoreAffinity = 25
-
-# Crypto - User instance #10
-Cy10Name = "SSL10"
-Cy10IsPolled = 1
-Cy10AcceleratorNumber = 2
-# List of core affinities
-Cy10CoreAffinity = 26
-
-# Crypto - User instance #11
-Cy11Name = "SSL11"
-Cy11IsPolled = 1
-Cy11AcceleratorNumber = 3
-# List of core affinities
-Cy11CoreAffinity = 27
-
-# Crypto - User instance #12
-Cy12Name = "SSL12"
-Cy12IsPolled = 1
-Cy12AcceleratorNumber = 0
-# List of core affinities
-Cy12CoreAffinity = 28
-
-# Crypto - User instance #13
-Cy13Name = "SSL13"
-Cy13IsPolled = 1
-Cy13AcceleratorNumber = 1
-# List of core affinities
-Cy13CoreAffinity = 29
-
-# Crypto - User instance #14
-Cy14Name = "SSL14"
-Cy14IsPolled = 1
-Cy14AcceleratorNumber = 2
-# List of core affinities
-Cy14CoreAffinity = 30
-
-# Crypto - User instance #15
-Cy15Name = "SSL15"
-Cy15IsPolled = 1
-Cy15AcceleratorNumber = 3
-# List of core affinities
-Cy15CoreAffinity = 31
-
-
-##############################################
-# Wireless Process Instance Section
-##############################################
-[WIRELESS]
-NumberCyInstances = 0
-NumberDcInstances = 0
-NumProcesses = 0
diff --git a/examples/dpdk_qat/config_files/stargo/dh89xxcc_qa_dev0.conf b/examples/dpdk_qat/config_files/stargo/dh89xxcc_qa_dev0.conf
deleted file mode 100644
index c3a85dea..00000000
--- a/examples/dpdk_qat/config_files/stargo/dh89xxcc_qa_dev0.conf
+++ /dev/null
@@ -1,235 +0,0 @@
-#########################################################################
-#
-# @par
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# #########################################################################
-# ########################################################
-#
-# This file is the configuration for a single dh89xxcc_qa
-# device.
-#
-# Each device has up to two accelerators.
-# - The client may load balance between these
-# accelerators.
-# Each accelerator has 8 independent ring banks.
-# - The interrupt for each can be directed to a
-# specific core.
-# Each ring bank as 16 rings (hardware assisted queues).
-#
-#########################################################
-# General Section
-##############################################
-
-[GENERAL]
-ServicesEnabled = cy0;cy1
-
-# Use version 2 of the config file
-ConfigVersion = 2
-# Look Aside Cryptographic Configuration
-cyHmacAuthMode = 1
-
-# Look Aside Compression Configuration
-dcTotalSRAMAvailable = 0
-
-# Firmware Location Configuration
-Firmware_MofPath = mof_firmware.bin
-Firmware_MmpPath = mmp_firmware.bin
-
-#Default values for number of concurrent requests*/
-CyNumConcurrentSymRequests = 512
-CyNumConcurrentAsymRequests = 64
-DcNumConcurrentRequests = 512
-
-#Statistics, valid values: 1,0
-statsGeneral = 1
-statsDc = 1
-statsDh = 1
-statsDrbg = 1
-statsDsa = 1
-statsEcc = 1
-statsKeyGen = 1
-statsLn = 1
-statsPrime = 1
-statsRsa = 1
-statsSym = 1
-
-# Enables or disables Single Root Complex IO Virtualization.
-# If this is enabled (1) then SRIOV and VT-d need to be enabled in
-# BIOS and there can be no Cy or Dc instances created in PF (Dom0).
-# If this i disabled (0) then SRIOV and VT-d need to be disabled
-# in BIOS and Cy and/or Dc instances can be used in PF (Dom0)
-SRIOV_Enabled = 0
-
-#Debug feature, if set to 1 it enables additional entries in /proc filesystem
-ProcDebug = 1
-
-#######################################################
-#
-# Logical Instances Section
-# A logical instance allows each address domain
-# (kernel space and individual user space processes)
-# to configure rings (i.e. hardware assisted queues)
-# to be used by that address domain and to define the
-# behavior of that ring.
-#
-# The address domains are in the following format
-# - For kernel address domains
-# [KERNEL]
-# - For user process address domains
-# [xxxxx]
-# Where xxxxx may be any ascii value which uniquely identifies
-# the user mode process.
-# To allow the driver correctly configure the
-# logical instances associated with this user process,
-# the process must call the icp_sal_userStartMultiProcess(...)
-# passing the xxxxx string during process initialisation.
-# When the user space process is finished it must call
-# icp_sal_userStop(...) to free resources.
-# NumProcesses will indicate the maximum number of processes
-# that can call icp_sal_userStartMultiProcess on this instance.
-# Warning: the resources are preallocated: if NumProcesses
-# is too high, the driver will fail to load
-#
-# Items configurable by a logical instance are:
-# - Name of the logical instance
-# - The accelerator associated with this logical
-# instance
-# - The core the instance is affinitized to (optional)
-#
-# Note: Logical instances may not share the same ring, but
-# may share a ring bank.
-#
-# The format of the logical instances are:
-# - For crypto:
-# Cy<n>Name = "xxxx"
-# Cy<n>AcceleratorNumber = 0-3
-# Cy<n>CoreAffinity = 0-7
-#
-# - For Data Compression
-# Dc<n>Name = "xxxx"
-# Dc<n>AcceleratorNumber = 0-1
-# Dc<n>CoreAffinity = 0-7
-#
-# Where:
-# - n is the number of this logical instance starting at 0.
-# - xxxx may be any ascii value which identifies the logical instance.
-#
-# Note: for user space processes, a list of values can be specified for
-# the accelerator number and the core affinity: for example
-# Cy0AcceleratorNumber = 0,2
-# Cy0CoreAffinity = 0,2,4
-# These comma-separated lists will allow the multiple processes to use
-# different accelerators and cores, and will wrap around the numbers
-# in the list. In the above example, process 0 will use accelerator 0,
-# and process 1 will use accelerator 2
-#
-########################################################
-
-##############################################
-# Kernel Instances Section
-##############################################
-[KERNEL]
-NumberCyInstances = 0
-NumberDcInstances = 0
-
-##############################################
-# User Process Instance Section
-##############################################
-[SSL]
-NumberCyInstances = 8
-NumberDcInstances = 0
-NumProcesses = 1
-LimitDevAccess = 0
-
-# Crypto - User instance #0
-Cy0Name = "SSL0"
-Cy0IsPolled = 1
-Cy0AcceleratorNumber = 0
-# List of core affinities
-Cy0CoreAffinity = 0
-
-# Crypto - User instance #1
-Cy1Name = "SSL1"
-Cy1IsPolled = 1
-Cy1AcceleratorNumber = 1
-# List of core affinities
-Cy1CoreAffinity = 1
-
-# Crypto - User instance #2
-Cy2Name = "SSL2"
-Cy2IsPolled = 1
-Cy2AcceleratorNumber = 2
-# List of core affinities
-Cy2CoreAffinity = 2
-
-# Crypto - User instance #3
-Cy3Name = "SSL3"
-Cy3IsPolled = 1
-Cy3AcceleratorNumber = 3
-# List of core affinities
-Cy3CoreAffinity = 3
-
-# Crypto - User instance #4
-Cy4Name = "SSL4"
-Cy4IsPolled = 1
-Cy4AcceleratorNumber = 0
-# List of core affinities
-Cy4CoreAffinity = 4
-
-# Crypto - User instance #5
-Cy5Name = "SSL5"
-Cy5IsPolled = 1
-Cy5AcceleratorNumber = 1
-# List of core affinities
-Cy5CoreAffinity = 5
-
-# Crypto - User instance #6
-Cy6Name = "SSL6"
-Cy6IsPolled = 1
-Cy6AcceleratorNumber = 2
-# List of core affinities
-Cy6CoreAffinity = 6
-
-# Crypto - User instance #7
-Cy7Name = "SSL7"
-Cy7IsPolled = 1
-Cy7AcceleratorNumber = 3
-# List of core affinities
-Cy7CoreAffinity = 7
-
-##############################################
-# Wireless Process Instance Section
-##############################################
-[WIRELESS]
-NumberCyInstances = 0
-NumberDcInstances = 0
-NumProcesses = 0
diff --git a/examples/dpdk_qat/crypto.c b/examples/dpdk_qat/crypto.c
deleted file mode 100644
index 02032f30..00000000
--- a/examples/dpdk_qat/crypto.c
+++ /dev/null
@@ -1,943 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <strings.h>
-#include <string.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <stdarg.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_ether.h>
-#include <rte_malloc.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_string_fns.h>
-
-#define CPA_CY_SYM_DP_TMP_WORKAROUND 1
-
-#include "cpa.h"
-#include "cpa_types.h"
-#include "cpa_cy_sym_dp.h"
-#include "cpa_cy_common.h"
-#include "cpa_cy_im.h"
-#include "icp_sal_user.h"
-#include "icp_sal_poll.h"
-
-#include "crypto.h"
-
-/* CIPHER KEY LENGTHS */
-#define KEY_SIZE_64_IN_BYTES (64 / 8)
-#define KEY_SIZE_56_IN_BYTES (56 / 8)
-#define KEY_SIZE_128_IN_BYTES (128 / 8)
-#define KEY_SIZE_168_IN_BYTES (168 / 8)
-#define KEY_SIZE_192_IN_BYTES (192 / 8)
-#define KEY_SIZE_256_IN_BYTES (256 / 8)
-
-/* HMAC AUTH KEY LENGTHS */
-#define AES_XCBC_AUTH_KEY_LENGTH_IN_BYTES (128 / 8)
-#define SHA1_AUTH_KEY_LENGTH_IN_BYTES (160 / 8)
-#define SHA224_AUTH_KEY_LENGTH_IN_BYTES (224 / 8)
-#define SHA256_AUTH_KEY_LENGTH_IN_BYTES (256 / 8)
-#define SHA384_AUTH_KEY_LENGTH_IN_BYTES (384 / 8)
-#define SHA512_AUTH_KEY_LENGTH_IN_BYTES (512 / 8)
-#define MD5_AUTH_KEY_LENGTH_IN_BYTES (128 / 8)
-#define KASUMI_AUTH_KEY_LENGTH_IN_BYTES (128 / 8)
-
-/* HASH DIGEST LENGHTS */
-#define AES_XCBC_DIGEST_LENGTH_IN_BYTES (128 / 8)
-#define AES_XCBC_96_DIGEST_LENGTH_IN_BYTES (96 / 8)
-#define MD5_DIGEST_LENGTH_IN_BYTES (128 / 8)
-#define SHA1_DIGEST_LENGTH_IN_BYTES (160 / 8)
-#define SHA1_96_DIGEST_LENGTH_IN_BYTES (96 / 8)
-#define SHA224_DIGEST_LENGTH_IN_BYTES (224 / 8)
-#define SHA256_DIGEST_LENGTH_IN_BYTES (256 / 8)
-#define SHA384_DIGEST_LENGTH_IN_BYTES (384 / 8)
-#define SHA512_DIGEST_LENGTH_IN_BYTES (512 / 8)
-#define KASUMI_DIGEST_LENGTH_IN_BYTES (32 / 8)
-
-#define IV_LENGTH_16_BYTES (16)
-#define IV_LENGTH_8_BYTES (8)
-
-
-/*
- * rte_memzone is used to allocate physically contiguous virtual memory.
- * In this application we allocate a single block and divide between variables
- * which require a virtual to physical mapping for use by the QAT driver.
- * Virt2phys is only performed during initialisation and not on the data-path.
- */
-
-#define LCORE_MEMZONE_SIZE (1 << 22)
-
-struct lcore_memzone
-{
- const struct rte_memzone *memzone;
- void *next_free_address;
-};
-
-/*
- * Size the qa software response queue.
- * Note: Head and Tail are 8 bit, therefore, the queue is
- * fixed to 256 entries.
- */
-#define CRYPTO_SOFTWARE_QUEUE_SIZE 256
-
-struct qa_callbackQueue {
- uint8_t head;
- uint8_t tail;
- uint16_t numEntries;
- struct rte_mbuf *qaCallbackRing[CRYPTO_SOFTWARE_QUEUE_SIZE];
-};
-
-struct qa_core_conf {
- CpaCySymDpSessionCtx *encryptSessionHandleTbl[NUM_CRYPTO][NUM_HMAC];
- CpaCySymDpSessionCtx *decryptSessionHandleTbl[NUM_CRYPTO][NUM_HMAC];
- CpaInstanceHandle instanceHandle;
- struct qa_callbackQueue callbackQueue;
- uint64_t qaOutstandingRequests;
- uint64_t numResponseAttempts;
- uint8_t kickFreq;
- void *pPacketIV;
- CpaPhysicalAddr packetIVPhy;
- struct lcore_memzone lcoreMemzone;
-} __rte_cache_aligned;
-
-#define MAX_CORES (RTE_MAX_LCORE)
-
-static struct qa_core_conf qaCoreConf[MAX_CORES];
-
-/*
- *Create maximum possible key size,
- *One for cipher and one for hash
- */
-struct glob_keys {
- uint8_t cipher_key[32];
- uint8_t hash_key[64];
- uint8_t iv[16];
-};
-
-struct glob_keys g_crypto_hash_keys = {
- .cipher_key = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
- 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10,
- 0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,
- 0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20},
- .hash_key = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
- 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10,
- 0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,
- 0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20,
- 0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,
- 0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f,0x30,
- 0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,
- 0x39,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f,0x50},
- .iv = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
- 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10}
-};
-
-/*
- * Offsets from the start of the packet.
- *
- */
-#define PACKET_DATA_START_PHYS(p) \
- ((p)->buf_physaddr + (p)->data_off)
-
-/*
- * A fixed offset to where the crypto is to be performed, which is the first
- * byte after the Ethernet(14 bytes) and IPv4 headers(20 bytes)
- */
-#define CRYPTO_START_OFFSET (14+20)
-#define HASH_START_OFFSET (14+20)
-#define CIPHER_BLOCK_DEFAULT_SIZE (16)
-#define HASH_BLOCK_DEFAULT_SIZE (16)
-
-/*
- * Offset to the opdata from the start of the data portion of packet.
- * Assumption: The buffer is physically contiguous.
- * +18 takes this to the next cache line.
- */
-
-#define CRYPTO_OFFSET_TO_OPDATA (ETHER_MAX_LEN+18)
-
-/*
- * Default number of requests to place on the hardware ring before kicking the
- * ring pointers.
- */
-#define CRYPTO_BURST_TX (16)
-
-/*
- * Only call the qa poll function when the number responses in the software
- * queue drops below this number.
- */
-#define CRYPTO_QUEUED_RESP_POLL_THRESHOLD (32)
-
-/*
- * Limit the number of polls per call to get_next_response.
- */
-#define GET_NEXT_RESPONSE_FREQ (32)
-
-/*
- * Max number of responses to pull from the qa in one poll.
- */
-#define CRYPTO_MAX_RESPONSE_QUOTA \
- (CRYPTO_SOFTWARE_QUEUE_SIZE-CRYPTO_QUEUED_RESP_POLL_THRESHOLD-1)
-
-#if (CRYPTO_QUEUED_RESP_POLL_THRESHOLD + CRYPTO_MAX_RESPONSE_QUOTA >= \
- CRYPTO_SOFTWARE_QUEUE_SIZE)
-#error Its possible to overflow the qa response Q with current poll and \
- response quota.
-#endif
-
-static void
-crypto_callback(CpaCySymDpOpData *pOpData,
- __rte_unused CpaStatus status,
- __rte_unused CpaBoolean verifyResult)
-{
- uint32_t lcore_id;
- lcore_id = rte_lcore_id();
- struct qa_callbackQueue *callbackQ = &(qaCoreConf[lcore_id].callbackQueue);
-
- /*
- * Received a completion from the QA hardware.
- * Place the response on the return queue.
- */
- callbackQ->qaCallbackRing[callbackQ->head] = pOpData->pCallbackTag;
- callbackQ->head++;
- callbackQ->numEntries++;
- qaCoreConf[lcore_id].qaOutstandingRequests--;
-}
-
-static void
-qa_crypto_callback(CpaCySymDpOpData *pOpData, CpaStatus status,
- CpaBoolean verifyResult)
-{
- crypto_callback(pOpData, status, verifyResult);
-}
-
-/*
- * Each allocation from a particular memzone lasts for the life-time of
- * the application. No freeing of previous allocations will occur.
- */
-static void *
-alloc_memzone_region(uint32_t length, uint32_t lcore_id)
-{
- char *current_free_addr_ptr = NULL;
- struct lcore_memzone *lcore_memzone = &(qaCoreConf[lcore_id].lcoreMemzone);
-
- current_free_addr_ptr = lcore_memzone->next_free_address;
-
- if (current_free_addr_ptr + length >=
- (char *)lcore_memzone->memzone->addr + lcore_memzone->memzone->len) {
- printf("Crypto: No memory available in memzone\n");
- return NULL;
- }
- lcore_memzone->next_free_address = current_free_addr_ptr + length;
-
- return (void *)current_free_addr_ptr;
-}
-
-/*
- * Virtual to Physical Address translation is only executed during initialization
- * and not on the data-path.
- */
-static CpaPhysicalAddr
-qa_v2p(void *ptr)
-{
- const struct rte_memzone *memzone = NULL;
- uint32_t lcore_id = 0;
- RTE_LCORE_FOREACH(lcore_id) {
- memzone = qaCoreConf[lcore_id].lcoreMemzone.memzone;
-
- if ((char*) ptr >= (char *) memzone->addr &&
- (char*) ptr < ((char*) memzone->addr + memzone->len)) {
- return (CpaPhysicalAddr)
- (memzone->phys_addr + ((char *) ptr - (char*) memzone->addr));
- }
- }
- printf("Crypto: Corresponding physical address not found in memzone\n");
- return (CpaPhysicalAddr) 0;
-}
-
-static CpaStatus
-getCoreAffinity(Cpa32U *coreAffinity, const CpaInstanceHandle instanceHandle)
-{
- CpaInstanceInfo2 info;
- Cpa16U i = 0;
- CpaStatus status = CPA_STATUS_SUCCESS;
-
- memset(&info, 0, sizeof(CpaInstanceInfo2));
-
- status = cpaCyInstanceGetInfo2(instanceHandle, &info);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: Error getting instance info\n");
- return CPA_STATUS_FAIL;
- }
- for (i = 0; i < MAX_CORES; i++) {
- if (CPA_BITMAP_BIT_TEST(info.coreAffinity, i)) {
- *coreAffinity = i;
- return CPA_STATUS_SUCCESS;
- }
- }
- return CPA_STATUS_FAIL;
-}
-
-static CpaStatus
-get_crypto_instance_on_core(CpaInstanceHandle *pInstanceHandle,
- uint32_t lcore_id)
-{
- Cpa16U numInstances = 0, i = 0;
- CpaStatus status = CPA_STATUS_FAIL;
- CpaInstanceHandle *pLocalInstanceHandles = NULL;
- Cpa32U coreAffinity = 0;
-
- status = cpaCyGetNumInstances(&numInstances);
- if (CPA_STATUS_SUCCESS != status || numInstances == 0) {
- return CPA_STATUS_FAIL;
- }
-
- pLocalInstanceHandles = rte_malloc("pLocalInstanceHandles",
- sizeof(CpaInstanceHandle) * numInstances, RTE_CACHE_LINE_SIZE);
-
- if (NULL == pLocalInstanceHandles) {
- return CPA_STATUS_FAIL;
- }
- status = cpaCyGetInstances(numInstances, pLocalInstanceHandles);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCyGetInstances failed with status: %"PRId32"\n", status);
- rte_free((void *) pLocalInstanceHandles);
- return CPA_STATUS_FAIL;
- }
-
- for (i = 0; i < numInstances; i++) {
- status = getCoreAffinity(&coreAffinity, pLocalInstanceHandles[i]);
- if (CPA_STATUS_SUCCESS != status) {
- rte_free((void *) pLocalInstanceHandles);
- return CPA_STATUS_FAIL;
- }
- if (coreAffinity == lcore_id) {
- printf("Crypto: instance found on core %d\n", i);
- *pInstanceHandle = pLocalInstanceHandles[i];
- return CPA_STATUS_SUCCESS;
- }
- }
- /* core affinity not found */
- rte_free((void *) pLocalInstanceHandles);
- return CPA_STATUS_FAIL;
-}
-
-static CpaStatus
-initCySymSession(const int pkt_cipher_alg,
- const int pkt_hash_alg, const CpaCySymHashMode hashMode,
- const CpaCySymCipherDirection crypto_direction,
- CpaCySymSessionCtx **ppSessionCtx,
- const CpaInstanceHandle cyInstanceHandle,
- const uint32_t lcore_id)
-{
- Cpa32U sessionCtxSizeInBytes = 0;
- CpaStatus status = CPA_STATUS_FAIL;
- CpaBoolean isCrypto = CPA_TRUE, isHmac = CPA_TRUE;
- CpaCySymSessionSetupData sessionSetupData;
-
- memset(&sessionSetupData, 0, sizeof(CpaCySymSessionSetupData));
-
- /* Assumption: key length is set to each algorithm's max length */
- switch (pkt_cipher_alg) {
- case NO_CIPHER:
- isCrypto = CPA_FALSE;
- break;
- case CIPHER_DES:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_DES_ECB;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_64_IN_BYTES;
- break;
- case CIPHER_DES_CBC:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_DES_CBC;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_64_IN_BYTES;
- break;
- case CIPHER_DES3:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_3DES_ECB;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_192_IN_BYTES;
- break;
- case CIPHER_DES3_CBC:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_3DES_CBC;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_192_IN_BYTES;
- break;
- case CIPHER_AES:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_AES_ECB;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_128_IN_BYTES;
- break;
- case CIPHER_AES_CBC_128:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_AES_CBC;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_128_IN_BYTES;
- break;
- case CIPHER_KASUMI_F8:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_KASUMI_F8;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_128_IN_BYTES;
- break;
- default:
- printf("Crypto: Undefined Cipher specified\n");
- break;
- }
- /* Set the cipher direction */
- if (isCrypto) {
- sessionSetupData.cipherSetupData.cipherDirection = crypto_direction;
- sessionSetupData.cipherSetupData.pCipherKey =
- g_crypto_hash_keys.cipher_key;
- sessionSetupData.symOperation = CPA_CY_SYM_OP_CIPHER;
- }
-
- /* Setup Hash common fields */
- switch (pkt_hash_alg) {
- case NO_HASH:
- isHmac = CPA_FALSE;
- break;
- case HASH_AES_XCBC:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_XCBC;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- AES_XCBC_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_AES_XCBC_96:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_XCBC;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- AES_XCBC_96_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_MD5:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- MD5_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA1:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA1_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA1_96:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA1_96_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA224:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA224;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA224_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA256:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA256;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA256_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA384:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA384;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA384_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA512:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA512;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA512_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_KASUMI_F9:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_KASUMI_F9;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- KASUMI_DIGEST_LENGTH_IN_BYTES;
- break;
- default:
- printf("Crypto: Undefined Hash specified\n");
- break;
- }
- if (isHmac) {
- sessionSetupData.hashSetupData.hashMode = hashMode;
- sessionSetupData.symOperation = CPA_CY_SYM_OP_HASH;
- /* If using authenticated hash setup key lengths */
- if (CPA_CY_SYM_HASH_MODE_AUTH == hashMode) {
- /* Use a common max length key */
- sessionSetupData.hashSetupData.authModeSetupData.authKey =
- g_crypto_hash_keys.hash_key;
- switch (pkt_hash_alg) {
- case HASH_AES_XCBC:
- case HASH_AES_XCBC_96:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- AES_XCBC_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_MD5:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA1_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA1:
- case HASH_SHA1_96:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA1_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA224:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA224_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA256:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA256_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA384:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA384_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA512:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA512_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_KASUMI_F9:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- KASUMI_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- default:
- printf("Crypto: Undefined Hash specified\n");
- return CPA_STATUS_FAIL;
- }
- }
- }
-
- /* Only high priority supported */
- sessionSetupData.sessionPriority = CPA_CY_PRIORITY_HIGH;
-
- /* If chaining algorithms */
- if (isCrypto && isHmac) {
- sessionSetupData.symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING;
- /* @assumption Alg Chain order is cipher then hash for encrypt
- * and hash then cipher then has for decrypt*/
- if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == crypto_direction) {
- sessionSetupData.algChainOrder =
- CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
- } else {
- sessionSetupData.algChainOrder =
- CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
- }
- }
- if (!isCrypto && !isHmac) {
- *ppSessionCtx = NULL;
- return CPA_STATUS_SUCCESS;
- }
-
- /* Set flags for digest operations */
- sessionSetupData.digestIsAppended = CPA_FALSE;
- sessionSetupData.verifyDigest = CPA_TRUE;
-
- /* Get the session context size based on the crypto and/or hash operations*/
- status = cpaCySymDpSessionCtxGetSize(cyInstanceHandle, &sessionSetupData,
- &sessionCtxSizeInBytes);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCySymDpSessionCtxGetSize error, status: %"PRId32"\n",
- status);
- return CPA_STATUS_FAIL;
- }
-
- *ppSessionCtx = alloc_memzone_region(sessionCtxSizeInBytes, lcore_id);
- if (NULL == *ppSessionCtx) {
- printf("Crypto: Failed to allocate memory for Session Context\n");
- return CPA_STATUS_FAIL;
- }
-
- status = cpaCySymDpInitSession(cyInstanceHandle, &sessionSetupData,
- *ppSessionCtx);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCySymDpInitSession failed with status %"PRId32"\n", status);
- return CPA_STATUS_FAIL;
- }
- return CPA_STATUS_SUCCESS;
-}
-
-static CpaStatus
-initSessionDataTables(struct qa_core_conf *qaCoreConf,uint32_t lcore_id)
-{
- Cpa32U i = 0, j = 0;
- CpaStatus status = CPA_STATUS_FAIL;
- for (i = 0; i < NUM_CRYPTO; i++) {
- for (j = 0; j < NUM_HMAC; j++) {
- if (((i == CIPHER_KASUMI_F8) && (j != NO_HASH) && (j != HASH_KASUMI_F9)) ||
- ((i != NO_CIPHER) && (i != CIPHER_KASUMI_F8) && (j == HASH_KASUMI_F9)))
- continue;
- status = initCySymSession(i, j, CPA_CY_SYM_HASH_MODE_AUTH,
- CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT,
- &qaCoreConf->encryptSessionHandleTbl[i][j],
- qaCoreConf->instanceHandle,
- lcore_id);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: Failed to initialize Encrypt sessions\n");
- return CPA_STATUS_FAIL;
- }
- status = initCySymSession(i, j, CPA_CY_SYM_HASH_MODE_AUTH,
- CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT,
- &qaCoreConf->decryptSessionHandleTbl[i][j],
- qaCoreConf->instanceHandle,
- lcore_id);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: Failed to initialize Decrypt sessions\n");
- return CPA_STATUS_FAIL;
- }
- }
- }
- return CPA_STATUS_SUCCESS;
-}
-
-int
-crypto_init(void)
-{
- if (CPA_STATUS_SUCCESS != icp_sal_userStartMultiProcess("SSL",CPA_FALSE)) {
- printf("Crypto: Could not start sal for user space\n");
- return CPA_STATUS_FAIL;
- }
- printf("Crypto: icp_sal_userStartMultiProcess(\"SSL\",CPA_FALSE)\n");
- return 0;
-}
-
-/*
- * Per core initialisation
- */
-int
-per_core_crypto_init(uint32_t lcore_id)
-{
- CpaStatus status = CPA_STATUS_FAIL;
- char memzone_name[RTE_MEMZONE_NAMESIZE];
-
- int socketID = rte_lcore_to_socket_id(lcore_id);
-
- /* Allocate software ring for response messages. */
-
- qaCoreConf[lcore_id].callbackQueue.head = 0;
- qaCoreConf[lcore_id].callbackQueue.tail = 0;
- qaCoreConf[lcore_id].callbackQueue.numEntries = 0;
- qaCoreConf[lcore_id].kickFreq = 0;
- qaCoreConf[lcore_id].qaOutstandingRequests = 0;
- qaCoreConf[lcore_id].numResponseAttempts = 0;
-
- /* Initialise and reserve lcore memzone for virt2phys translation */
- snprintf(memzone_name,
- RTE_MEMZONE_NAMESIZE,
- "lcore_%u",
- lcore_id);
-
- qaCoreConf[lcore_id].lcoreMemzone.memzone = rte_memzone_reserve(
- memzone_name,
- LCORE_MEMZONE_SIZE,
- socketID,
- 0);
- if (NULL == qaCoreConf[lcore_id].lcoreMemzone.memzone) {
- printf("Crypto: Error allocating memzone on lcore %u\n",lcore_id);
- return -1;
- }
- qaCoreConf[lcore_id].lcoreMemzone.next_free_address =
- qaCoreConf[lcore_id].lcoreMemzone.memzone->addr;
-
- qaCoreConf[lcore_id].pPacketIV = alloc_memzone_region(IV_LENGTH_16_BYTES,
- lcore_id);
-
- if (NULL == qaCoreConf[lcore_id].pPacketIV ) {
- printf("Crypto: Failed to allocate memory for Initialization Vector\n");
- return -1;
- }
-
- memcpy(qaCoreConf[lcore_id].pPacketIV, &g_crypto_hash_keys.iv,
- IV_LENGTH_16_BYTES);
-
- qaCoreConf[lcore_id].packetIVPhy = qa_v2p(qaCoreConf[lcore_id].pPacketIV);
- if (0 == qaCoreConf[lcore_id].packetIVPhy) {
- printf("Crypto: Invalid physical address for Initialization Vector\n");
- return -1;
- }
-
- /*
- * Obtain the instance handle that is mapped to the current lcore.
- * This can fail if an instance is not mapped to a bank which has been
- * affinitized to the current lcore.
- */
- status = get_crypto_instance_on_core(&(qaCoreConf[lcore_id].instanceHandle),
- lcore_id);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: get_crypto_instance_on_core failed with status: %"PRId32"\n",
- status);
- return -1;
- }
-
- status = cpaCySymDpRegCbFunc(qaCoreConf[lcore_id].instanceHandle,
- (CpaCySymDpCbFunc) qa_crypto_callback);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCySymDpRegCbFunc failed with status: %"PRId32"\n", status);
- return -1;
- }
-
- /*
- * Set the address translation callback for virtual to physcial address
- * mapping. This will be called by the QAT driver during initialisation only.
- */
- status = cpaCySetAddressTranslation(qaCoreConf[lcore_id].instanceHandle,
- (CpaVirtualToPhysical) qa_v2p);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCySetAddressTranslation failed with status: %"PRId32"\n",
- status);
- return -1;
- }
-
- status = initSessionDataTables(&qaCoreConf[lcore_id],lcore_id);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: Failed to allocate all session tables.");
- return -1;
- }
- return 0;
-}
-
-static CpaStatus
-enqueueOp(CpaCySymDpOpData *opData, uint32_t lcore_id)
-{
-
- CpaStatus status;
-
- /*
- * Assumption is there is no requirement to do load balancing between
- * acceleration units - that is one acceleration unit is tied to a core.
- */
- opData->instanceHandle = qaCoreConf[lcore_id].instanceHandle;
-
- if ((++qaCoreConf[lcore_id].kickFreq) % CRYPTO_BURST_TX == 0) {
- status = cpaCySymDpEnqueueOp(opData, CPA_TRUE);
- } else {
- status = cpaCySymDpEnqueueOp(opData, CPA_FALSE);
- }
-
- qaCoreConf[lcore_id].qaOutstandingRequests++;
-
- return status;
-}
-
-void
-crypto_flush_tx_queue(uint32_t lcore_id)
-{
-
- cpaCySymDpPerformOpNow(qaCoreConf[lcore_id].instanceHandle);
-}
-
-enum crypto_result
-crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
-{
- CpaCySymDpOpData *opData =
- rte_pktmbuf_mtod_offset(rte_buff, CpaCySymDpOpData *,
- CRYPTO_OFFSET_TO_OPDATA);
- uint32_t lcore_id;
-
- if (unlikely(c >= NUM_CRYPTO || h >= NUM_HMAC))
- return CRYPTO_RESULT_FAIL;
-
- lcore_id = rte_lcore_id();
-
- memset(opData, 0, sizeof(CpaCySymDpOpData));
-
- opData->srcBuffer = opData->dstBuffer = PACKET_DATA_START_PHYS(rte_buff);
- opData->srcBufferLen = opData->dstBufferLen = rte_buff->data_len;
- opData->sessionCtx = qaCoreConf[lcore_id].encryptSessionHandleTbl[c][h];
- opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
- + CRYPTO_OFFSET_TO_OPDATA;
- opData->pCallbackTag = rte_buff;
-
- /* if no crypto or hash operations are specified return fail */
- if (NO_CIPHER == c && NO_HASH == h)
- return CRYPTO_RESULT_FAIL;
-
- if (NO_CIPHER != c) {
- opData->pIv = qaCoreConf[lcore_id].pPacketIV;
- opData->iv = qaCoreConf[lcore_id].packetIVPhy;
-
- if (CIPHER_AES_CBC_128 == c)
- opData->ivLenInBytes = IV_LENGTH_16_BYTES;
- else
- opData->ivLenInBytes = IV_LENGTH_8_BYTES;
-
- opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
- opData->messageLenToCipherInBytes = rte_buff->data_len
- - CRYPTO_START_OFFSET;
- /*
- * Work around for padding, message length has to be a multiple of
- * block size.
- */
- opData->messageLenToCipherInBytes -= opData->messageLenToCipherInBytes
- % CIPHER_BLOCK_DEFAULT_SIZE;
- }
-
- if (NO_HASH != h) {
-
- opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
- opData->messageLenToHashInBytes = rte_buff->data_len
- - HASH_START_OFFSET;
- /*
- * Work around for padding, message length has to be a multiple of block
- * size.
- */
- opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
- % HASH_BLOCK_DEFAULT_SIZE;
-
- /*
- * Assumption: Ok ignore the passed digest pointer and place HMAC at end
- * of packet.
- */
- opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
- }
-
- if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
- /*
- * Failed to place a packet on the hardware queue.
- * Most likely because the QA hardware is busy.
- */
- return CRYPTO_RESULT_FAIL;
- }
- return CRYPTO_RESULT_IN_PROGRESS;
-}
-
-enum crypto_result
-crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
-{
-
- CpaCySymDpOpData *opData = rte_pktmbuf_mtod_offset(rte_buff, void *,
- CRYPTO_OFFSET_TO_OPDATA);
- uint32_t lcore_id;
-
- if (unlikely(c >= NUM_CRYPTO || h >= NUM_HMAC))
- return CRYPTO_RESULT_FAIL;
-
- lcore_id = rte_lcore_id();
-
- memset(opData, 0, sizeof(CpaCySymDpOpData));
-
- opData->dstBuffer = opData->srcBuffer = PACKET_DATA_START_PHYS(rte_buff);
- opData->dstBufferLen = opData->srcBufferLen = rte_buff->data_len;
- opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
- + CRYPTO_OFFSET_TO_OPDATA;
- opData->sessionCtx = qaCoreConf[lcore_id].decryptSessionHandleTbl[c][h];
- opData->pCallbackTag = rte_buff;
-
- /* if no crypto or hmac operations are specified return fail */
- if (NO_CIPHER == c && NO_HASH == h)
- return CRYPTO_RESULT_FAIL;
-
- if (NO_CIPHER != c) {
- opData->pIv = qaCoreConf[lcore_id].pPacketIV;
- opData->iv = qaCoreConf[lcore_id].packetIVPhy;
-
- if (CIPHER_AES_CBC_128 == c)
- opData->ivLenInBytes = IV_LENGTH_16_BYTES;
- else
- opData->ivLenInBytes = IV_LENGTH_8_BYTES;
-
- opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
- opData->messageLenToCipherInBytes = rte_buff->data_len
- - CRYPTO_START_OFFSET;
-
- /*
- * Work around for padding, message length has to be a multiple of block
- * size.
- */
- opData->messageLenToCipherInBytes -= opData->messageLenToCipherInBytes
- % CIPHER_BLOCK_DEFAULT_SIZE;
- }
- if (NO_HASH != h) {
- opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
- opData->messageLenToHashInBytes = rte_buff->data_len
- - HASH_START_OFFSET;
- /*
- * Work around for padding, message length has to be a multiple of block
- * size.
- */
- opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
- % HASH_BLOCK_DEFAULT_SIZE;
- opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
- }
-
- if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
- /*
- * Failed to place a packet on the hardware queue.
- * Most likely because the QA hardware is busy.
- */
- return CRYPTO_RESULT_FAIL;
- }
- return CRYPTO_RESULT_IN_PROGRESS;
-}
-
-void *
-crypto_get_next_response(void)
-{
- uint32_t lcore_id;
- lcore_id = rte_lcore_id();
- struct qa_callbackQueue *callbackQ = &(qaCoreConf[lcore_id].callbackQueue);
- void *entry = NULL;
-
- if (callbackQ->numEntries) {
- entry = callbackQ->qaCallbackRing[callbackQ->tail];
- callbackQ->tail++;
- callbackQ->numEntries--;
- }
-
- /* If there are no outstanding requests no need to poll, return entry */
- if (qaCoreConf[lcore_id].qaOutstandingRequests == 0)
- return entry;
-
- if (callbackQ->numEntries < CRYPTO_QUEUED_RESP_POLL_THRESHOLD
- && qaCoreConf[lcore_id].numResponseAttempts++
- % GET_NEXT_RESPONSE_FREQ == 0) {
- /*
- * Only poll the hardware when there is less than
- * CRYPTO_QUEUED_RESP_POLL_THRESHOLD elements in the software queue
- */
- icp_sal_CyPollDpInstance(qaCoreConf[lcore_id].instanceHandle,
- CRYPTO_MAX_RESPONSE_QUOTA);
- }
- return entry;
-}
diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c
deleted file mode 100644
index aa9b1d5c..00000000
--- a/examples/dpdk_qat/main.c
+++ /dev/null
@@ -1,821 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <string.h>
-#include <sys/queue.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <getopt.h>
-
-#include <rte_common.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_launch.h>
-#include <rte_atomic.h>
-#include <rte_cycles.h>
-#include <rte_prefetch.h>
-#include <rte_lcore.h>
-#include <rte_per_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_random.h>
-#include <rte_debug.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_ip.h>
-#include <rte_string_fns.h>
-
-#include "crypto.h"
-
-#define NB_MBUF (32 * 1024)
-
-#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
-#define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
-#define TSC_COUNT_LIMIT 1000
-
-#define ACTION_ENCRYPT 1
-#define ACTION_DECRYPT 2
-
-/*
- * Configurable number of RX/TX ring descriptors
- */
-#define RTE_TEST_RX_DESC_DEFAULT 128
-#define RTE_TEST_TX_DESC_DEFAULT 512
-static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
-static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
-
-/* ethernet addresses of ports */
-static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
-
-/* mask of enabled ports */
-static unsigned enabled_port_mask = 0;
-static int promiscuous_on = 1; /**< Ports set in promiscuous mode on by default. */
-
-/* list of enabled ports */
-static uint32_t dst_ports[RTE_MAX_ETHPORTS];
-
-struct mbuf_table {
- uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST];
-};
-
-struct lcore_rx_queue {
- uint8_t port_id;
- uint8_t queue_id;
-};
-
-#define MAX_RX_QUEUE_PER_LCORE 16
-
-#define MAX_LCORE_PARAMS 1024
-struct lcore_params {
- uint8_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
-};
-
-static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
-static struct lcore_params lcore_params_array_default[] = {
- {0, 0, 2},
- {0, 1, 2},
- {0, 2, 2},
- {1, 0, 2},
- {1, 1, 2},
- {1, 2, 2},
- {2, 0, 2},
- {3, 0, 3},
- {3, 1, 3},
-};
-
-static struct lcore_params * lcore_params = lcore_params_array_default;
-static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
- sizeof(lcore_params_array_default[0]);
-
-static struct rte_eth_conf port_conf = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 1, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
- },
- .rx_adv_conf = {
- .rss_conf = {
- .rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
- },
- },
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
-};
-
-static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES];
-
-struct lcore_conf {
- uint64_t tsc;
- uint64_t tsc_count;
- uint32_t tx_mask;
- uint16_t n_rx_queue;
- uint16_t rx_queue_list_pos;
- struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
- uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
- struct mbuf_table rx_mbuf;
- uint32_t rx_mbuf_pos;
- uint32_t rx_curr_queue;
- struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
-} __rte_cache_aligned;
-
-static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
-
-static inline struct rte_mbuf *
-nic_rx_get_packet(struct lcore_conf *qconf)
-{
- struct rte_mbuf *pkt;
-
- if (unlikely(qconf->n_rx_queue == 0))
- return NULL;
-
- /* Look for the next queue with packets; return if none */
- if (unlikely(qconf->rx_mbuf_pos == qconf->rx_mbuf.len)) {
- uint32_t i;
-
- qconf->rx_mbuf_pos = 0;
- for (i = 0; i < qconf->n_rx_queue; i++) {
- qconf->rx_mbuf.len = rte_eth_rx_burst(
- qconf->rx_queue_list[qconf->rx_curr_queue].port_id,
- qconf->rx_queue_list[qconf->rx_curr_queue].queue_id,
- qconf->rx_mbuf.m_table, MAX_PKT_BURST);
-
- qconf->rx_curr_queue++;
- if (unlikely(qconf->rx_curr_queue == qconf->n_rx_queue))
- qconf->rx_curr_queue = 0;
- if (likely(qconf->rx_mbuf.len > 0))
- break;
- }
- if (unlikely(i == qconf->n_rx_queue))
- return NULL;
- }
-
- /* Get the next packet from the current queue; if last packet, go to next queue */
- pkt = qconf->rx_mbuf.m_table[qconf->rx_mbuf_pos];
- qconf->rx_mbuf_pos++;
-
- return pkt;
-}
-
-static inline void
-nic_tx_flush_queues(struct lcore_conf *qconf)
-{
- uint8_t portid;
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- struct rte_mbuf **m_table = NULL;
- uint16_t queueid, len;
- uint32_t n, i;
-
- if (likely((qconf->tx_mask & (1 << portid)) == 0))
- continue;
-
- len = qconf->tx_mbufs[portid].len;
- if (likely(len == 0))
- continue;
-
- queueid = qconf->tx_queue_id[portid];
- m_table = qconf->tx_mbufs[portid].m_table;
-
- n = rte_eth_tx_burst(portid, queueid, m_table, len);
- for (i = n; i < len; i++){
- rte_pktmbuf_free(m_table[i]);
- }
-
- qconf->tx_mbufs[portid].len = 0;
- }
-
- qconf->tx_mask = TX_QUEUE_FLUSH_MASK;
-}
-
-static inline void
-nic_tx_send_packet(struct rte_mbuf *pkt, uint8_t port)
-{
- struct lcore_conf *qconf;
- uint32_t lcoreid;
- uint16_t len;
-
- if (unlikely(pkt == NULL)) {
- return;
- }
-
- lcoreid = rte_lcore_id();
- qconf = &lcore_conf[lcoreid];
-
- len = qconf->tx_mbufs[port].len;
- qconf->tx_mbufs[port].m_table[len] = pkt;
- len++;
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
- uint32_t n, i;
- uint16_t queueid;
-
- queueid = qconf->tx_queue_id[port];
- n = rte_eth_tx_burst(port, queueid, qconf->tx_mbufs[port].m_table, MAX_PKT_BURST);
- for (i = n; i < MAX_PKT_BURST; i++){
- rte_pktmbuf_free(qconf->tx_mbufs[port].m_table[i]);
- }
-
- qconf->tx_mask &= ~(1 << port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len;
-}
-
-/* main processing loop */
-static __attribute__((noreturn)) int
-main_loop(__attribute__((unused)) void *dummy)
-{
- uint32_t lcoreid;
- struct lcore_conf *qconf;
- const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
-
- lcoreid = rte_lcore_id();
- qconf = &lcore_conf[lcoreid];
-
- printf("Thread %u starting...\n", lcoreid);
-
- for (;;) {
- struct rte_mbuf *pkt;
- uint32_t pkt_from_nic_rx = 0;
- uint8_t port;
-
- /* Flush TX queues */
- qconf->tsc_count++;
- if (unlikely(qconf->tsc_count == TSC_COUNT_LIMIT)) {
- uint64_t tsc, diff_tsc;
-
- tsc = rte_rdtsc();
-
- diff_tsc = tsc - qconf->tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
- nic_tx_flush_queues(qconf);
- crypto_flush_tx_queue(lcoreid);
- qconf->tsc = tsc;
- }
-
- qconf->tsc_count = 0;
- }
-
- /*
- * Check the Intel QuickAssist queues first
- *
- ***/
- pkt = (struct rte_mbuf *) crypto_get_next_response();
- if (pkt == NULL) {
- pkt = nic_rx_get_packet(qconf);
- pkt_from_nic_rx = 1;
- }
- if (pkt == NULL)
- continue;
- /* Send packet to either QAT encrypt, QAT decrypt or NIC TX */
- if (pkt_from_nic_rx) {
- struct ipv4_hdr *ip = rte_pktmbuf_mtod_offset(pkt,
- struct ipv4_hdr *,
- sizeof(struct ether_hdr));
- if (ip->src_addr & rte_cpu_to_be_32(ACTION_ENCRYPT)) {
- if (CRYPTO_RESULT_FAIL == crypto_encrypt(pkt,
- (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
- (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
- rte_pktmbuf_free(pkt);
- continue;
- }
-
- if (ip->src_addr & rte_cpu_to_be_32(ACTION_DECRYPT)) {
- if(CRYPTO_RESULT_FAIL == crypto_decrypt(pkt,
- (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
- (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
- rte_pktmbuf_free(pkt);
- continue;
- }
- }
-
- port = dst_ports[pkt->port];
-
- /* Transmit the packet */
- nic_tx_send_packet(pkt, (uint8_t)port);
- }
-}
-
-static inline unsigned
-get_port_max_rx_queues(uint8_t port_id)
-{
- struct rte_eth_dev_info dev_info;
-
- rte_eth_dev_info_get(port_id, &dev_info);
- return dev_info.max_rx_queues;
-}
-
-static inline unsigned
-get_port_max_tx_queues(uint8_t port_id)
-{
- struct rte_eth_dev_info dev_info;
-
- rte_eth_dev_info_get(port_id, &dev_info);
- return dev_info.max_tx_queues;
-}
-
-static int
-check_lcore_params(void)
-{
- uint16_t i;
-
- for (i = 0; i < nb_lcore_params; ++i) {
- if (lcore_params[i].queue_id >= get_port_max_rx_queues(lcore_params[i].port_id)) {
- printf("invalid queue number: %hhu\n", lcore_params[i].queue_id);
- return -1;
- }
- if (!rte_lcore_is_enabled(lcore_params[i].lcore_id)) {
- printf("error: lcore %hhu is not enabled in lcore mask\n",
- lcore_params[i].lcore_id);
- return -1;
- }
- }
- return 0;
-}
-
-static int
-check_port_config(const unsigned nb_ports)
-{
- unsigned portid;
- uint16_t i;
-
- for (i = 0; i < nb_lcore_params; ++i) {
- portid = lcore_params[i].port_id;
- if ((enabled_port_mask & (1 << portid)) == 0) {
- printf("port %u is not enabled in port mask\n", portid);
- return -1;
- }
- if (portid >= nb_ports) {
- printf("port %u is not present on the board\n", portid);
- return -1;
- }
- }
- return 0;
-}
-
-static uint8_t
-get_port_n_rx_queues(const uint8_t port)
-{
- int queue = -1;
- uint16_t i;
-
- for (i = 0; i < nb_lcore_params; ++i) {
- if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
- queue = lcore_params[i].queue_id;
- }
- return (uint8_t)(++queue);
-}
-
-static int
-init_lcore_rx_queues(void)
-{
- uint16_t i, nb_rx_queue;
- uint8_t lcore;
-
- for (i = 0; i < nb_lcore_params; ++i) {
- lcore = lcore_params[i].lcore_id;
- nb_rx_queue = lcore_conf[lcore].n_rx_queue;
- if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
- printf("error: too many queues (%u) for lcore: %u\n",
- (unsigned)nb_rx_queue + 1, (unsigned)lcore);
- return -1;
- }
- lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
- lcore_params[i].port_id;
- lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
- lcore_params[i].queue_id;
- lcore_conf[lcore].n_rx_queue++;
- }
- return 0;
-}
-
-/* display usage */
-static void
-print_usage(const char *prgname)
-{
- printf ("%s [EAL options] -- -p PORTMASK [--no-promisc]"
- " [--config '(port,queue,lcore)[,(port,queue,lcore)]'\n"
- " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
- " --no-promisc: disable promiscuous mode (default is ON)\n"
- " --config '(port,queue,lcore)': rx queues configuration\n",
- prgname);
-}
-
-static unsigned
-parse_portmask(const char *portmask)
-{
- char *end = NULL;
- unsigned pm;
-
- /* parse hexadecimal string */
- pm = strtoul(portmask, &end, 16);
- if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
- return 0;
-
- return pm;
-}
-
-static int
-parse_config(const char *q_arg)
-{
- char s[256];
- const char *p, *p_end = q_arg;
- char *end;
- enum fieldnames {
- FLD_PORT = 0,
- FLD_QUEUE,
- FLD_LCORE,
- _NUM_FLD
- };
- unsigned long int_fld[_NUM_FLD];
- char *str_fld[_NUM_FLD];
- int i;
- unsigned size;
-
- nb_lcore_params = 0;
-
- while ((p = strchr(p_end,'(')) != NULL) {
- if (nb_lcore_params >= MAX_LCORE_PARAMS) {
- printf("exceeded max number of lcore params: %hu\n",
- nb_lcore_params);
- return -1;
- }
- ++p;
- if((p_end = strchr(p,')')) == NULL)
- return -1;
-
- size = p_end - p;
- if(size >= sizeof(s))
- return -1;
-
- snprintf(s, sizeof(s), "%.*s", size, p);
- if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
- return -1;
- for (i = 0; i < _NUM_FLD; i++) {
- errno = 0;
- int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
- return -1;
- }
- lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
- lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
- lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
- ++nb_lcore_params;
- }
- lcore_params = lcore_params_array;
- return 0;
-}
-
-/* Parse the argument given in the command line of the application */
-static int
-parse_args(int argc, char **argv)
-{
- int opt, ret;
- char **argvopt;
- int option_index;
- char *prgname = argv[0];
- static struct option lgopts[] = {
- {"config", 1, 0, 0},
- {"no-promisc", 0, 0, 0},
- {NULL, 0, 0, 0}
- };
-
- argvopt = argv;
-
- while ((opt = getopt_long(argc, argvopt, "p:",
- lgopts, &option_index)) != EOF) {
-
- switch (opt) {
- /* portmask */
- case 'p':
- enabled_port_mask = parse_portmask(optarg);
- if (enabled_port_mask == 0) {
- printf("invalid portmask\n");
- print_usage(prgname);
- return -1;
- }
- break;
-
- /* long options */
- case 0:
- if (strcmp(lgopts[option_index].name, "config") == 0) {
- ret = parse_config(optarg);
- if (ret) {
- printf("invalid config\n");
- print_usage(prgname);
- return -1;
- }
- }
- if (strcmp(lgopts[option_index].name, "no-promisc") == 0) {
- printf("Promiscuous mode disabled\n");
- promiscuous_on = 0;
- }
- break;
- default:
- print_usage(prgname);
- return -1;
- }
- }
-
- if (enabled_port_mask == 0) {
- printf("portmask not specified\n");
- print_usage(prgname);
- return -1;
- }
-
- if (optind >= 0)
- argv[optind-1] = prgname;
-
- ret = optind-1;
- optind = 0; /* reset getopt lib */
- return ret;
-}
-
-static void
-print_ethaddr(const char *name, const struct ether_addr *eth_addr)
-{
- char buf[ETHER_ADDR_FMT_SIZE];
- ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
- printf("%s%s", name, buf);
-}
-
-static int
-init_mem(void)
-{
- int socketid;
- unsigned lcoreid;
- char s[64];
-
- RTE_LCORE_FOREACH(lcoreid) {
- socketid = rte_lcore_to_socket_id(lcoreid);
- if (socketid >= RTE_MAX_NUMA_NODES) {
- printf("Socket %d of lcore %u is out of range %d\n",
- socketid, lcoreid, RTE_MAX_NUMA_NODES);
- return -1;
- }
- if (pktmbuf_pool[socketid] == NULL) {
- snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
- pktmbuf_pool[socketid] =
- rte_pktmbuf_pool_create(s, NB_MBUF, 32, 0,
- RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
- if (pktmbuf_pool[socketid] == NULL) {
- printf("Cannot init mbuf pool on socket %d\n", socketid);
- return -1;
- }
- printf("Allocated mbuf pool on socket %d\n", socketid);
- }
- }
- return 0;
-}
-
-int
-main(int argc, char **argv)
-{
- struct lcore_conf *qconf;
- struct rte_eth_link link;
- int ret;
- unsigned nb_ports;
- uint16_t queueid;
- unsigned lcoreid;
- uint32_t nb_tx_queue;
- uint8_t portid, nb_rx_queue, queue, socketid, last_port;
- unsigned nb_ports_in_mask = 0;
-
- /* init EAL */
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- return -1;
- argc -= ret;
- argv += ret;
-
- /* parse application arguments (after the EAL ones) */
- ret = parse_args(argc, argv);
- if (ret < 0)
- return -1;
-
- if (check_lcore_params() < 0)
- rte_panic("check_lcore_params failed\n");
-
- ret = init_lcore_rx_queues();
- if (ret < 0)
- return -1;
-
- ret = init_mem();
- if (ret < 0)
- return -1;
-
- nb_ports = rte_eth_dev_count();
-
- if (check_port_config(nb_ports) < 0)
- rte_panic("check_port_config failed\n");
-
- /* reset dst_ports */
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
- dst_ports[portid] = 0;
- last_port = 0;
-
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
- if ((enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- if (nb_ports_in_mask % 2) {
- dst_ports[portid] = last_port;
- dst_ports[last_port] = portid;
- }
- else
- last_port = portid;
-
- nb_ports_in_mask++;
- }
- if (nb_ports_in_mask % 2) {
- printf("Notice: odd number of ports in portmask.\n");
- dst_ports[last_port] = last_port;
- }
-
- /* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
- if ((enabled_port_mask & (1 << portid)) == 0) {
- printf("\nSkipping disabled port %d\n", portid);
- continue;
- }
-
- /* init port */
- printf("Initializing port %d ... ", portid );
- fflush(stdout);
-
- nb_rx_queue = get_port_n_rx_queues(portid);
- if (nb_rx_queue > get_port_max_rx_queues(portid))
- rte_panic("Number of rx queues %d exceeds max number of rx queues %u"
- " for port %d\n", nb_rx_queue, get_port_max_rx_queues(portid),
- portid);
- nb_tx_queue = rte_lcore_count();
- if (nb_tx_queue > get_port_max_tx_queues(portid))
- rte_panic("Number of lcores %u exceeds max number of tx queues %u"
- " for port %d\n", nb_tx_queue, get_port_max_tx_queues(portid),
- portid);
- printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
- nb_rx_queue, (unsigned)nb_tx_queue );
- ret = rte_eth_dev_configure(portid, nb_rx_queue,
- (uint16_t)nb_tx_queue, &port_conf);
- if (ret < 0)
- rte_panic("Cannot configure device: err=%d, port=%d\n",
- ret, portid);
-
- rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
- print_ethaddr(" Address:", &ports_eth_addr[portid]);
- printf(", ");
-
- /* init one TX queue per couple (lcore,port) */
- queueid = 0;
- RTE_LCORE_FOREACH(lcoreid) {
- socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
- printf("txq=%u,%d,%d ", lcoreid, queueid, socketid);
- fflush(stdout);
- ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socketid,
- NULL);
- if (ret < 0)
- rte_panic("rte_eth_tx_queue_setup: err=%d, "
- "port=%d\n", ret, portid);
-
- qconf = &lcore_conf[lcoreid];
- qconf->tx_queue_id[portid] = queueid;
- queueid++;
- }
- printf("\n");
- }
-
- RTE_LCORE_FOREACH(lcoreid) {
- qconf = &lcore_conf[lcoreid];
- printf("\nInitializing rx queues on lcore %u ... ", lcoreid );
- fflush(stdout);
- /* init RX queues */
- for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
- portid = qconf->rx_queue_list[queue].port_id;
- queueid = qconf->rx_queue_list[queue].queue_id;
- socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
- printf("rxq=%d,%d,%d ", portid, queueid, socketid);
- fflush(stdout);
-
- ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
- socketid,
- NULL,
- pktmbuf_pool[socketid]);
- if (ret < 0)
- rte_panic("rte_eth_rx_queue_setup: err=%d,"
- "port=%d\n", ret, portid);
- }
- }
-
- printf("\n");
-
- /* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
- if ((enabled_port_mask & (1 << portid)) == 0)
- continue;
- /* Start device */
- ret = rte_eth_dev_start(portid);
- if (ret < 0)
- rte_panic("rte_eth_dev_start: err=%d, port=%d\n",
- ret, portid);
-
- printf("done: Port %d ", portid);
-
- /* get link status */
- rte_eth_link_get(portid, &link);
- if (link.link_status)
- printf(" Link Up - speed %u Mbps - %s\n",
- (unsigned) link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
- else
- printf(" Link Down\n");
- /*
- * If enabled, put device in promiscuous mode.
- * This allows IO forwarding mode to forward packets
- * to itself through 2 cross-connected ports of the
- * target machine.
- */
- if (promiscuous_on)
- rte_eth_promiscuous_enable(portid);
- }
- printf("Crypto: Initializing Crypto...\n");
- if (crypto_init() != 0)
- return -1;
-
- RTE_LCORE_FOREACH(lcoreid) {
- if (per_core_crypto_init(lcoreid) != 0) {
- printf("Crypto: Cannot init lcore crypto on lcore %u\n", (unsigned)lcoreid);
- return -1;
- }
- }
- printf("Crypto: Initialization complete\n");
- /* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcoreid) {
- if (rte_eal_wait_lcore(lcoreid) < 0)
- return -1;
- }
-
- return 0;
-}
diff --git a/examples/ethtool/Makefile b/examples/ethtool/Makefile
index 995cd25b..30b42b70 100644
--- a/examples/ethtool/Makefile
+++ b/examples/ethtool/Makefile
@@ -46,4 +46,7 @@ else
DIRS-y += lib ethtool-app
endif
+DEPDIRS-ethtool-app := lib
+DEPDIRS-lib := librte_eal librte_ether
+
include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/ethtool/ethtool-app/Makefile b/examples/ethtool/ethtool-app/Makefile
index 09c66ad1..96abf53b 100644
--- a/examples/ethtool/ethtool-app/Makefile
+++ b/examples/ethtool/ethtool-app/Makefile
@@ -50,5 +50,10 @@ CFLAGS += $(WERROR_FLAGS)
LDLIBS += -L$(subst ethtool-app,lib,$(RTE_OUTPUT))/lib
LDLIBS += -lrte_ethtool
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
+LDLIBS += -lrte_pmd_ixgbe
+endif
+endif
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/ethtool/ethtool-app/ethapp.c b/examples/ethtool/ethtool-app/ethapp.c
index 6aeaa061..35269ea2 100644
--- a/examples/ethtool/ethtool-app/ethapp.c
+++ b/examples/ethtool/ethtool-app/ethapp.c
@@ -185,6 +185,8 @@ pcmd_drvinfo_callback(__rte_unused void *ptr_params,
printf("Port %i driver: %s (ver: %s)\n",
id_port, info.driver, info.version
);
+ printf("firmware-version: %s\n", info.fw_version);
+ printf("bus-info: %s\n", info.bus_info);
}
}
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 2c655d83..6d50d463 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -172,7 +172,6 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
"%s:%i: rte_eth_dev_start failed",
__FILE__, __LINE__
);
- rte_eth_promiscuous_enable(idx_port);
rte_eth_macaddr_get(idx_port, &ptr_port->mac_addr);
rte_spinlock_init(&ptr_port->lock);
}
diff --git a/examples/ethtool/lib/Makefile b/examples/ethtool/lib/Makefile
index 5b4991e2..266babad 100644
--- a/examples/ethtool/lib/Makefile
+++ b/examples/ethtool/lib/Makefile
@@ -54,8 +54,10 @@ SRCS-y := rte_ethtool.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-# internal dependencies
-DEPDIRS-y += lib/librte_eal
-DEPDIRS-y += lib/librte_ether
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
+LDLIBS += -lrte_pmd_ixgbe
+endif
+endif
include $(RTE_SDK)/mk/rte.extlib.mk
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 6f0ce848..7e465206 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,6 +36,9 @@
#include <rte_version.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
#include "rte_ethtool.h"
#define PKTPOOL_SIZE 512
@@ -48,12 +51,21 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
struct rte_eth_dev_info dev_info;
struct rte_dev_reg_info reg_info;
int n;
+ int ret;
if (drvinfo == NULL)
return -EINVAL;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ ret = rte_eth_dev_fw_version_get(port_id, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+ if (ret < 0)
+ printf("firmware version get error: (%s)\n", strerror(-ret));
+ else if (ret > 0)
+ printf("Insufficient fw version buffer size, "
+ "the minimun size should be %d\n", ret);
+
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
@@ -358,9 +370,12 @@ rte_ethtool_net_set_rx_mode(uint8_t port_id)
num_vfs = dev_info.max_vfs;
/* Set VF vf_rx_mode, VF unsupport status is discard */
- for (vf = 0; vf < num_vfs; vf++)
- rte_eth_dev_set_vf_rxmode(port_id, vf,
+ for (vf = 0; vf < num_vfs; vf++) {
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
ETH_VMDQ_ACCEPT_UNTAG, 0);
+#endif
+ }
/* Enable Rx vlan filter, VF unspport status is discard */
rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
diff --git a/examples/exception_path/Makefile b/examples/exception_path/Makefile
index 959914a2..4b6e0717 100644
--- a/examples/exception_path/Makefile
+++ b/examples/exception_path/Makefile
@@ -42,6 +42,7 @@ ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
+clean:
else
# binary name
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index 73d50b69..89bf1cc0 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -114,7 +114,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /* IP checksum offload disabled */
.hw_vlan_filter = 0, /* VLAN filtering disabled */
.jumbo_frame = 0, /* Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /* CRC stripped by hardware */
+ .hw_strip_crc = 1, /* CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index e1e32c66..71c1d12f 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -168,7 +168,7 @@ struct lcore_queue_conf {
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
-static const struct rte_eth_conf port_conf = {
+static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
@@ -176,7 +176,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 1, /**< Jumbo Frame Support enabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -265,8 +265,8 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
uint8_t queueid, uint8_t port_in)
{
struct rx_queue *rxq;
- uint32_t i, len, next_hop_ipv4;
- uint8_t next_hop_ipv6, port_out, ipv6;
+ uint32_t i, len, next_hop;
+ uint8_t port_out, ipv6;
int32_t len2;
ipv6 = 0;
@@ -290,9 +290,9 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
/* Find destination port */
- if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop_ipv4) == 0 &&
- (enabled_port_mask & 1 << next_hop_ipv4) != 0) {
- port_out = next_hop_ipv4;
+ if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
+ (enabled_port_mask & 1 << next_hop) != 0) {
+ port_out = next_hop;
/* Build transmission burst for new port */
len = qconf->tx_mbufs[port_out].len;
@@ -326,9 +326,10 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
ip_hdr = rte_pktmbuf_mtod(m, struct ipv6_hdr *);
/* Find destination port */
- if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop_ipv6) == 0 &&
- (enabled_port_mask & 1 << next_hop_ipv6) != 0) {
- port_out = next_hop_ipv6;
+ if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+ &next_hop) == 0 &&
+ (enabled_port_mask & 1 << next_hop) != 0) {
+ port_out = next_hop;
/* Build transmission burst for new port */
len = qconf->tx_mbufs[port_out].len;
@@ -586,7 +587,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -653,6 +654,74 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
}
}
+/* Check L3 packet type detection capablity of the NIC port */
+static int
+check_ptype(int portid)
+{
+ int i, ret;
+ int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
+ uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
+ if (ret <= 0)
+ return 0;
+
+ uint32_t ptypes[ret];
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
+ for (i = 0; i < ret; ++i) {
+ if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+ ptype_l3_ipv4 = 1;
+ if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+ ptype_l3_ipv6 = 1;
+ }
+
+ if (ptype_l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+ if (ptype_l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+
+ if (ptype_l3_ipv4 && ptype_l3_ipv6)
+ return 1;
+
+ return 0;
+
+}
+
+/* Parse packet type of a packet by SW */
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth_hdr;
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ uint16_t ether_type;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_type = eth_hdr->ether_type;
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ m->packet_type = packet_type;
+}
+
+/* callback function to detect packet type for a queue of a port */
+static uint16_t
+cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; ++i)
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
static int
init_routing_table(void)
{
@@ -846,6 +915,11 @@ main(int argc, char **argv)
qconf = &lcore_queue_conf[rx_lcore_id];
+ /* limit the frame size to the maximum supported by NIC */
+ rte_eth_dev_info_get(portid, &dev_info);
+ port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
+ dev_info.max_rx_pktlen, port_conf.rxmode.max_rx_pkt_len);
+
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
@@ -911,7 +985,6 @@ main(int argc, char **argv)
printf("txq=%u,%d ", lcore_id, queueid);
fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
@@ -944,6 +1017,12 @@ main(int argc, char **argv)
ret, portid);
rte_eth_promiscuous_enable(portid);
+
+ if (check_ptype(portid) == 0) {
+ rte_eth_add_rx_callback(portid, 0, cb_parse_ptype, NULL);
+ printf("Add Rx callback funciton to detect L3 packet type by SW :"
+ " port = %d\n", portid);
+ }
}
if (init_routing_table() < 0)
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index 58271173..dc7e0ddd 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -36,8 +36,6 @@ endif
# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
-DIRS-(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline
-
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
diff --git a/examples/ip_pipeline/config/diagram-generator.py b/examples/ip_pipeline/config/diagram-generator.py
index 6b7170b0..17488330 100755
--- a/examples/ip_pipeline/config/diagram-generator.py
+++ b/examples/ip_pipeline/config/diagram-generator.py
@@ -36,7 +36,8 @@
# the DPDK ip_pipeline application.
#
# The input configuration file is translated to an output file in DOT syntax,
-# which is then used to create the image file using graphviz (www.graphviz.org).
+# which is then used to create the image file using graphviz
+# (www.graphviz.org).
#
from __future__ import print_function
@@ -94,6 +95,7 @@ DOT_GRAPH_END = \
# SOURCEx | SOURCEx | SOURCEx | PIPELINEy | SOURCEx
# SINKx | SINKx | PIPELINEy | SINKx | SINKx
+
#
# Parse the input configuration file to detect the graph nodes and edges
#
@@ -321,16 +323,17 @@ def process_config_file(cfgfile):
#
print('Creating image file "%s" ...' % imgfile)
if os.system('which dot > /dev/null'):
- print('Error: Unable to locate "dot" executable.' \
- 'Please install the "graphviz" package (www.graphviz.org).')
+ print('Error: Unable to locate "dot" executable.'
+ 'Please install the "graphviz" package (www.graphviz.org).')
return
os.system(dot_cmd)
if __name__ == '__main__':
- parser = argparse.ArgumentParser(description=\
- 'Create diagram for IP pipeline configuration file.')
+ parser = argparse.ArgumentParser(description='Create diagram for IP '
+ 'pipeline configuration '
+ 'file.')
parser.add_argument(
'-f',
diff --git a/examples/ip_pipeline/config/pipeline-to-core-mapping.py b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
index c2050b82..7a4eaa20 100755
--- a/examples/ip_pipeline/config/pipeline-to-core-mapping.py
+++ b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
@@ -39,15 +39,14 @@
#
from __future__ import print_function
-import sys
-import errno
-import os
-import re
+from collections import namedtuple
+import argparse
import array
+import errno
import itertools
+import os
import re
-import argparse
-from collections import namedtuple
+import sys
# default values
enable_stage0_traceout = 1
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
index 8b372e94..0b761346 100644
--- a/examples/ip_pipeline/config_parse.c
+++ b/examples/ip_pipeline/config_parse.c
@@ -1,4 +1,4 @@
-/*-
+/*-
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
@@ -103,7 +103,7 @@ static const struct app_link_params link_params_default = {
.hw_vlan_strip = 0, /* VLAN strip */
.hw_vlan_extend = 0, /* Extended VLAN */
.jumbo_frame = 0, /* Jumbo frame support */
- .hw_strip_crc = 0, /* CRC strip by HW */
+ .hw_strip_crc = 1, /* CRC strip by HW */
.enable_scatter = 0, /* Scattered packets RX handler */
.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
@@ -3407,7 +3407,7 @@ app_config_args(struct app_params *app, int argc, char **argv)
app_print_usage(argv[0]);
}
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
/* Check dependencies between args */
if (preproc_params_present && (preproc_present == 0))
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index d46bd365..be148fca 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -69,7 +69,8 @@ static void
app_init_core_map(struct app_params *app)
{
APP_LOG(app, HIGH, "Initializing CPU core map ...");
- app->core_map = cpu_core_map_init(4, 32, 4, 0);
+ app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
+ 4, 0);
if (app->core_map == NULL)
rte_panic("Cannot create CPU core map\n");
@@ -329,16 +330,14 @@ app_init_mempool(struct app_params *app)
struct app_mempool_params *p = &app->mempool_params[i];
APP_LOG(app, HIGH, "Initializing %s ...", p->name);
- app->mempool[i] = rte_mempool_create(
- p->name,
- p->pool_size,
- p->buffer_size,
- p->cache_size,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- p->cpu_socket_id,
- 0);
+ app->mempool[i] = rte_pktmbuf_pool_create(
+ p->name,
+ p->pool_size,
+ p->cache_size,
+ 0, /* priv_size */
+ p->buffer_size -
+ sizeof(struct rte_mbuf), /* mbuf data size */
+ p->cpu_socket_id);
if (app->mempool[i] == NULL)
rte_panic("%s init error\n", p->name);
@@ -718,7 +717,8 @@ app_link_up_internal(struct app_params *app, struct app_link_params *cp)
/* PMD link up */
status = rte_eth_dev_set_link_up(cp->pmd_id);
- if (status < 0)
+ /* Do not panic if PMD does not provide link up functionality */
+ if (status < 0 && status != -ENOTSUP)
rte_panic("%s (%" PRIu32 "): PMD set link up error %"
PRId32 "\n", cp->name, cp->pmd_id, status);
@@ -734,7 +734,8 @@ app_link_down_internal(struct app_params *app, struct app_link_params *cp)
/* PMD link down */
status = rte_eth_dev_set_link_down(cp->pmd_id);
- if (status < 0)
+ /* Do not panic if PMD does not provide link down functionality */
+ if (status < 0 && status != -ENOTSUP)
rte_panic("%s (%" PRIu32 "): PMD set link down error %"
PRId32 "\n", cp->name, cp->pmd_id, status);
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
index b61f3034..2980492b 100644
--- a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
@@ -161,7 +161,7 @@ static struct rte_acl_field_def field_format_ipv4[] = {
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = 4,
- .input_index = 4,
+ .input_index = 3,
.offset = sizeof(struct ether_hdr) +
sizeof(struct ipv4_hdr) +
offsetof(struct tcp_hdr, dst_port),
@@ -221,7 +221,7 @@ static struct rte_acl_field_def field_format_vlan_ipv4[] = {
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = 4,
- .input_index = 4,
+ .input_index = 3,
.offset = sizeof(struct ether_hdr) +
SIZEOF_VLAN_HDR +
sizeof(struct ipv4_hdr) +
@@ -282,7 +282,7 @@ static struct rte_acl_field_def field_format_qinq_ipv4[] = {
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = 4,
- .input_index = 4,
+ .input_index = 3,
.offset = sizeof(struct ether_hdr) +
SIZEOF_QINQ_HEADER +
sizeof(struct ipv4_hdr) +
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.c b/examples/ip_pipeline/pipeline/pipeline_routing.c
index 3aadbf91..3deaff9c 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing.c
@@ -494,6 +494,26 @@ app_pipeline_routing_add_route(struct app_params *app,
/* data */
if (data->port_id >= p->n_ports_out)
return -1;
+
+ /* Valid range of VLAN tags 12 bits */
+ if (data->flags & PIPELINE_ROUTING_ROUTE_QINQ)
+ if ((data->l2.qinq.svlan & 0xF000) ||
+ (data->l2.qinq.cvlan & 0xF000))
+ return -1;
+
+ /* Max number of MPLS labels supported */
+ if (data->flags & PIPELINE_ROUTING_ROUTE_MPLS) {
+ uint32_t i;
+
+ if (data->l2.mpls.n_labels >
+ PIPELINE_ROUTING_MPLS_LABELS_MAX)
+ return -1;
+
+ /* Max MPLS label value 20 bits */
+ for (i = 0; i < data->l2.mpls.n_labels; i++)
+ if (data->l2.mpls.labels[i] & 0xFFF00000)
+ return -1;
+ }
}
break;
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 50fe4228..c0f3ced6 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -200,7 +200,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 1, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -346,8 +346,8 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
struct rte_ip_frag_death_row *dr;
struct rx_queue *rxq;
void *d_addr_bytes;
- uint32_t next_hop_ipv4;
- uint8_t next_hop_ipv6, dst_port;
+ uint32_t next_hop;
+ uint8_t dst_port;
rxq = &qconf->rx_queue_list[queue];
@@ -390,9 +390,9 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
/* Find destination port */
- if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop_ipv4) == 0 &&
- (enabled_port_mask & 1 << next_hop_ipv4) != 0) {
- dst_port = next_hop_ipv4;
+ if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
+ (enabled_port_mask & 1 << next_hop) != 0) {
+ dst_port = next_hop;
}
eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
@@ -427,9 +427,10 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
}
/* Find destination port */
- if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop_ipv6) == 0 &&
- (enabled_port_mask & 1 << next_hop_ipv6) != 0) {
- dst_port = next_hop_ipv6;
+ if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+ &next_hop) == 0 &&
+ (enabled_port_mask & 1 << next_hop) != 0) {
+ dst_port = next_hop;
}
eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
@@ -718,7 +719,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -1062,6 +1063,11 @@ main(int argc, char **argv)
qconf = &lcore_queue_conf[rx_lcore_id];
+ /* limit the frame size to the maximum supported by NIC */
+ rte_eth_dev_info_get(portid, &dev_info);
+ port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
+ dev_info.max_rx_pktlen, port_conf.rxmode.max_rx_pkt_len);
+
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
@@ -1128,7 +1134,6 @@ main(int argc, char **argv)
printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index ec5a2e62..e77afa0e 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -78,7 +78,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
- RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
+ RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
payload_len, sa->block_size);
return -EINVAL;
}
@@ -122,6 +122,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
switch (sa->auth_algo) {
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) +
sa->iv_len + payload_len;
@@ -354,6 +355,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
switch (sa->auth_algo) {
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) +
sa->iv_len + pad_payload_len;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 5a4c9b71..8cbf6ac4 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -208,7 +208,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -618,7 +618,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
static inline void
route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
- int16_t hop[MAX_PKT_BURST * 2];
+ int32_t hop[MAX_PKT_BURST * 2];
uint8_t dst_ip[MAX_PKT_BURST * 2][16];
uint8_t *ip6_dst;
uint16_t i, offset;
@@ -1039,7 +1039,7 @@ parse_args(int32_t argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index f49143b9..edca5f02 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -47,6 +47,7 @@
static inline int
create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
{
+ struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
int32_t ret;
struct cdev_key key = { 0 };
@@ -65,7 +66,7 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
return -1;
}
- RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
+ RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
"%u qp %u\n", sa->spi,
ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
@@ -73,6 +74,18 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
sa->crypto_session = rte_cryptodev_sym_session_create(
ipsec_ctx->tbl[cdev_id_qp].id, sa->xforms);
+ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
+ if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
+ ret = rte_cryptodev_queue_pair_attach_sym_session(
+ ipsec_ctx->tbl[cdev_id_qp].qp,
+ sa->crypto_session);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
+ "Session cannot be attached to qp %u ",
+ ipsec_ctx->tbl[cdev_id_qp].qp);
+ return -1;
+ }
+ }
sa->cdev_id_qp = cdev_id_qp;
return 0;
@@ -89,7 +102,7 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
cqp->buf, cqp->len);
if (ret < cqp->len) {
- RTE_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
+ RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
cqp->id, cqp->qp,
ret, cqp->len);
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index dbc8c2cb..fe426614 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -90,7 +90,7 @@ struct ip_addr {
} ip;
};
-#define MAX_KEY_SIZE 20
+#define MAX_KEY_SIZE 32
struct ipsec_sa {
uint32_t spi;
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 8c4406cf..39624c49 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -114,6 +114,12 @@ const struct supported_auth_algo auth_algos[] = {
.key_len = 20
},
{
+ .keyword = "sha256-hmac",
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .digest_len = 12,
+ .key_len = 32
+ },
+ {
.keyword = "aes-128-gcm",
.algo = RTE_CRYPTO_AUTH_AES_GCM,
.digest_len = 16,
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 708d76e9..96a4ab6e 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -137,7 +137,7 @@ struct lcore_queue_conf {
} __rte_cache_aligned;
static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
-static const struct rte_eth_conf port_conf = {
+static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
@@ -145,7 +145,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 1, /**< Jumbo Frame Support enabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -575,7 +575,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -725,6 +725,11 @@ main(int argc, char **argv)
qconf = &lcore_queue_conf[rx_lcore_id];
+ /* limit the frame size to the maximum supported by NIC */
+ rte_eth_dev_info_get(portid, &dev_info);
+ port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
+ dev_info.max_rx_pktlen, port_conf.rxmode.max_rx_pkt_len);
+
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
@@ -777,7 +782,6 @@ main(int argc, char **argv)
printf("txq=%u,%hu ", lcore_id, queueid);
fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 57313d11..0be57d83 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -130,7 +130,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /* IP checksum offload disabled */
.hw_vlan_filter = 0, /* VLAN filtering disabled */
.jumbo_frame = 0, /* Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /* CRC stripped by hardware */
+ .hw_strip_crc = 1, /* CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/l2fwd-cat/cat.c b/examples/l2fwd-cat/cat.c
index bad39305..6133bf5b 100644
--- a/examples/l2fwd-cat/cat.c
+++ b/examples/l2fwd-cat/cat.c
@@ -686,7 +686,7 @@ parse_args(int argc, char **argv)
exit:
/* reset getopt lib */
- optind = 0;
+ optind = 1;
/* Restore opterr value */
opterr = oldopterr;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index bc88be5e..94921935 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -135,9 +135,6 @@ struct l2fwd_key {
phys_addr_t phys_addr;
};
-char supported_auth_algo[RTE_CRYPTO_AUTH_LIST_END][MAX_STR_LEN];
-char supported_cipher_algo[RTE_CRYPTO_CIPHER_LIST_END][MAX_STR_LEN];
-
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -170,6 +167,8 @@ struct l2fwd_crypto_options {
uint16_t block_size;
char string_type[MAX_STR_LEN];
+
+ uint64_t cryptodev_mask;
};
/** l2fwd crypto lcore params */
@@ -215,7 +214,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -331,50 +330,6 @@ print_stats(void)
printf("\n====================================================\n");
}
-static void
-fill_supported_algorithm_tables(void)
-{
- unsigned i;
-
- for (i = 0; i < RTE_CRYPTO_AUTH_LIST_END; i++)
- strcpy(supported_auth_algo[i], "NOT_SUPPORTED");
-
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GMAC], "AES_GMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5], "MD5");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC],
- "AES_XCBC_MAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1], "SHA1");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224], "SHA224");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256], "SHA256");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384], "SHA384");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512], "SHA512");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_ZUC_EIA3], "ZUC_EIA3");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9");
-
- for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++)
- strcpy(supported_cipher_algo[i], "NOT_SUPPORTED");
-
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CBC], "AES_CBC");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CTR], "AES_CTR");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_ZUC_EEA3], "ZUC_EEA3");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CTR], "3DES_CTR");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CBC], "3DES_CBC");
-}
-
-
static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
@@ -432,7 +387,8 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ip_hdr;
- unsigned ipdata_offset, pad_len, data_len;
+ uint32_t ipdata_offset, data_len;
+ uint32_t pad_len = 0;
char *padding;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -455,16 +411,33 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
if (cparams->do_hash && cparams->hash_verify)
data_len -= cparams->digest_length;
- pad_len = data_len % cparams->block_size ? cparams->block_size -
- (data_len % cparams->block_size) : 0;
+ if (cparams->do_cipher) {
+ /*
+ * Following algorithms are block cipher algorithms,
+ * and might need padding
+ */
+ switch (cparams->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ if (data_len % cparams->block_size)
+ pad_len = cparams->block_size -
+ (data_len % cparams->block_size);
+ break;
+ default:
+ pad_len = 0;
+ }
- if (pad_len) {
- padding = rte_pktmbuf_append(m, pad_len);
- if (unlikely(!padding))
- return -1;
+ if (pad_len) {
+ padding = rte_pktmbuf_append(m, pad_len);
+ if (unlikely(!padding))
+ return -1;
- data_len += pad_len;
- memset(padding, 0, pad_len);
+ data_len += pad_len;
+ memset(padding, 0, pad_len);
+ }
}
/* Set crypto operation data parameters */
@@ -499,6 +472,10 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
op->sym->auth.aad.data = cparams->aad.data;
op->sym->auth.aad.phys_addr = cparams->aad.phys_addr;
op->sym->auth.aad.length = cparams->aad.length;
+ } else {
+ op->sym->auth.aad.data = NULL;
+ op->sym->auth.aad.phys_addr = 0;
+ op->sym->auth.aad.length = 0;
}
}
@@ -699,7 +676,8 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
generate_random_key(port_cparams[i].aad.data,
port_cparams[i].aad.length);
- }
+ } else
+ port_cparams[i].aad.length = 0;
if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
port_cparams[i].hash_verify = 1;
@@ -810,7 +788,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
ops_burst, nb_rx) !=
nb_rx) {
for (j = 0; j < nb_rx; j++)
- rte_pktmbuf_free(pkts_burst[i]);
+ rte_pktmbuf_free(pkts_burst[j]);
nb_rx = 0;
}
@@ -881,7 +859,8 @@ l2fwd_crypto_usage(const char *prgname)
" --aad_random_size SIZE: size of AAD when generated randomly\n"
" --digest_size SIZE: size of digest to be generated/verified\n"
- " --sessionless\n",
+ " --sessionless\n"
+ " --cryptodev_mask MASK: hexadecimal bitmask of crypto devices to configure\n",
prgname);
}
@@ -928,17 +907,14 @@ parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg)
static int
parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg)
{
- unsigned i;
- for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++) {
- if (!strcmp(supported_cipher_algo[i], optarg)) {
- *algo = (enum rte_crypto_cipher_algorithm)i;
- return 0;
- }
+ if (rte_cryptodev_get_cipher_algo_enum(algo, optarg) < 0) {
+ RTE_LOG(ERR, USER1, "Cipher algorithm specified "
+ "not supported!\n");
+ return -1;
}
- printf("Cipher algorithm not supported!\n");
- return -1;
+ return 0;
}
/** Parse crypto cipher operation command line argument */
@@ -1004,17 +980,13 @@ parse_size(int *size, const char *q_arg)
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- unsigned i;
-
- for (i = 0; i < RTE_CRYPTO_AUTH_LIST_END; i++) {
- if (!strcmp(supported_auth_algo[i], optarg)) {
- *algo = (enum rte_crypto_auth_algorithm)i;
- return 0;
- }
+ if (rte_cryptodev_get_auth_algo_enum(algo, optarg) < 0) {
+ RTE_LOG(ERR, USER1, "Authentication algorithm specified "
+ "not supported!\n");
+ return -1;
}
- printf("Authentication algorithm specified not supported!\n");
- return -1;
+ return 0;
}
static int
@@ -1032,6 +1004,27 @@ parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg)
return -1;
}
+static int
+parse_cryptodev_mask(struct l2fwd_crypto_options *options,
+ const char *q_arg)
+{
+ char *end = NULL;
+ uint64_t pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(q_arg, &end, 16);
+ if ((pm == '\0') || (end == NULL) || (*end != '\0'))
+ pm = 0;
+
+ options->cryptodev_mask = pm;
+ if (options->cryptodev_mask == 0) {
+ printf("invalid cryptodev_mask specified\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/** Parse long options */
static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
@@ -1132,6 +1125,9 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return 0;
}
+ else if (strcmp(lgopts[option_index].name, "cryptodev_mask") == 0)
+ return parse_cryptodev_mask(options, optarg);
+
return -1;
}
@@ -1246,6 +1242,7 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
options->type = CDEV_TYPE_ANY;
+ options->cryptodev_mask = UINT64_MAX;
}
static void
@@ -1253,7 +1250,7 @@ display_cipher_info(struct l2fwd_crypto_options *options)
{
printf("\n---- Cipher information ---\n");
printf("Algorithm: %s\n",
- supported_cipher_algo[options->cipher_xform.cipher.algo]);
+ rte_crypto_cipher_algorithm_strings[options->cipher_xform.cipher.algo]);
rte_hexdump(stdout, "Cipher key:",
options->cipher_xform.cipher.key.data,
options->cipher_xform.cipher.key.length);
@@ -1265,7 +1262,7 @@ display_auth_info(struct l2fwd_crypto_options *options)
{
printf("\n---- Authentication information ---\n");
printf("Algorithm: %s\n",
- supported_auth_algo[options->auth_xform.auth.algo]);
+ rte_crypto_auth_algorithm_strings[options->auth_xform.cipher.algo]);
rte_hexdump(stdout, "Auth key:",
options->auth_xform.auth.key.data,
options->auth_xform.auth.key.length);
@@ -1368,6 +1365,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "digest_size", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+ { "cryptodev_mask", required_argument, 0, 0},
{ NULL, 0, 0, 0 }
};
@@ -1432,7 +1430,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
argv[optind-1] = prgname;
retval = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return retval;
}
@@ -1508,6 +1506,17 @@ check_type(struct l2fwd_crypto_options *options, struct rte_cryptodev_info *dev_
return -1;
}
+/* Check if the device is enabled by cryptodev_mask */
+static int
+check_cryptodev_mask(struct l2fwd_crypto_options *options,
+ uint8_t cdev_id)
+{
+ if (options->cryptodev_mask & (1 << cdev_id))
+ return 0;
+
+ return -1;
+}
+
static inline int
check_supported_size(uint16_t length, uint16_t min, uint16_t max,
uint16_t increment)
@@ -1562,6 +1571,9 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
}
};
+ if (check_cryptodev_mask(options, (uint8_t)cdev_id))
+ continue;
+
rte_cryptodev_info_get(cdev_id, &dev_info);
/* Set cipher parameters */
@@ -1587,7 +1599,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
printf("Algorithm %s not supported by cryptodev %u"
" or device not of preferred type (%s)\n",
- supported_cipher_algo[opt_cipher_algo],
+ rte_crypto_cipher_algorithm_strings[opt_cipher_algo],
cdev_id,
options->string_type);
continue;
@@ -1687,13 +1699,12 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
printf("Algorithm %s not supported by cryptodev %u"
" or device not of preferred type (%s)\n",
- supported_auth_algo[opt_auth_algo],
+ rte_crypto_auth_algorithm_strings[opt_auth_algo],
cdev_id,
options->string_type);
continue;
}
- options->block_size = cap->sym.auth.block_size;
/*
* Check if length of provided AAD is supported
* by the algorithm chosen.
@@ -1967,9 +1978,6 @@ main(int argc, char **argv)
/* reserve memory for Cipher/Auth key and IV */
reserve_key_memory(&options);
- /* fill out the supported algorithm tables */
- fill_supported_algorithm_tables();
-
/* parse application arguments (after the EAL ones) */
ret = l2fwd_crypto_parse_args(&options, argc, argv);
if (ret < 0)
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index dd9201b2..e6e6c228 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -126,7 +126,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -709,7 +709,7 @@ l2fwd_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 60cccdb1..37453483 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -44,6 +44,7 @@
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
+#include <signal.h>
#include <rte_common.h>
#include <rte_log.h>
@@ -116,7 +117,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -142,6 +143,15 @@ static int64_t check_period = 5; /* default check cycle is 5ms */
/* Keepalive structure */
struct rte_keepalive *rte_global_keepalive_info;
+/* Termination signalling */
+static int terminate_signal_received;
+
+/* Termination signal handler */
+static void handle_sigterm(__rte_unused int value)
+{
+ terminate_signal_received = 1;
+}
+
/* Print out statistics on packets dropped */
static void
print_stats(__attribute__((unused)) struct rte_timer *ptr_timer,
@@ -251,7 +261,7 @@ l2fwd_main_loop(void)
uint64_t tsc_initial = rte_rdtsc();
uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz();
- while (1) {
+ while (!terminate_signal_received) {
/* Keepalive heartbeat */
rte_keepalive_mark_alive(rte_global_keepalive_info);
@@ -464,7 +474,7 @@ l2fwd_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -526,6 +536,8 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
static void
dead_core(__rte_unused void *ptr_data, const int id_core)
{
+ if (terminate_signal_received)
+ return;
printf("Dead core %i - restarting..\n", id_core);
if (rte_eal_get_lcore_state(id_core) == FINISHED) {
rte_eal_wait_lcore(id_core);
@@ -554,6 +566,16 @@ main(int argc, char **argv)
uint8_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
+ struct sigaction signal_handler;
+ struct rte_keepalive_shm *ka_shm;
+
+ memset(&signal_handler, 0, sizeof(signal_handler));
+ terminate_signal_received = 0;
+ signal_handler.sa_handler = &handle_sigterm;
+ if (sigaction(SIGINT, &signal_handler, NULL) == -1 ||
+ sigaction(SIGTERM, &signal_handler, NULL) == -1)
+ rte_exit(EXIT_FAILURE, "SIGNAL\n");
+
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -730,9 +752,8 @@ main(int argc, char **argv)
rte_timer_subsystem_init();
rte_timer_init(&stats_timer);
+ ka_shm = NULL;
if (check_period > 0) {
- struct rte_keepalive_shm *ka_shm;
-
ka_shm = rte_keepalive_shm_create();
if (ka_shm == NULL)
rte_exit(EXIT_FAILURE,
@@ -782,7 +803,7 @@ main(int argc, char **argv)
lcore_id);
}
}
- for (;;) {
+ while (!terminate_signal_received) {
rte_timer_manage();
rte_delay_ms(5);
}
@@ -792,5 +813,7 @@ main(int argc, char **argv)
return -1;
}
+ if (ka_shm != NULL)
+ rte_keepalive_shm_cleanup(ka_shm);
return 0;
}
diff --git a/examples/l2fwd-keepalive/shm.c b/examples/l2fwd-keepalive/shm.c
index 177aa5b8..fbf5bd79 100644
--- a/examples/l2fwd-keepalive/shm.c
+++ b/examples/l2fwd-keepalive/shm.c
@@ -129,3 +129,13 @@ void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
strerror(errno));
}
}
+
+void rte_keepalive_shm_cleanup(struct rte_keepalive_shm *ka_shm)
+{
+ if (shm_unlink(RTE_KEEPALIVE_SHM_NAME) == -1 && errno != ENOENT)
+ printf("Warning: Error unlinking %s (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME, strerror(errno));
+
+ if (ka_shm && munmap(ka_shm, sizeof(struct rte_keepalive_shm)) != 0)
+ printf("Warning: munmap() failed\n");
+}
diff --git a/examples/l2fwd-keepalive/shm.h b/examples/l2fwd-keepalive/shm.h
index 25e1b61d..66a60600 100644
--- a/examples/l2fwd-keepalive/shm.h
+++ b/examples/l2fwd-keepalive/shm.h
@@ -87,3 +87,12 @@ struct rte_keepalive_shm *rte_keepalive_shm_create(void);
void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
const int id_core, const enum rte_keepalive_state core_state,
uint64_t last_alive);
+
+/** Shutdown cleanup of shared host memory keepalive object.
+ * @param *shm
+ * Pointer to SHM keepalive structure. May be NULL.
+ *
+ * If *shm is NULL, this function will only attempt to remove the
+ * shared host memory handle and not unmap the underlying memory.
+ */
+void rte_keepalive_shm_cleanup(struct rte_keepalive_shm *ka_shm);
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index b2f58519..f9667272 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -120,7 +120,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -392,6 +392,29 @@ l2fwd_parse_timer_period(const char *q_arg)
return n;
}
+static const char short_options[] =
+ "p:" /* portmask */
+ "q:" /* number of queues */
+ "T:" /* timer period */
+ ;
+
+#define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
+#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
+
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options */
+ CMD_LINE_OPT_MIN_NUM = 256,
+};
+
+static const struct option lgopts[] = {
+ { CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
+ { CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
+ {NULL, 0, 0, 0}
+};
+
/* Parse the argument given in the command line of the application */
static int
l2fwd_parse_args(int argc, char **argv)
@@ -400,15 +423,10 @@ l2fwd_parse_args(int argc, char **argv)
char **argvopt;
int option_index;
char *prgname = argv[0];
- static struct option lgopts[] = {
- { "mac-updating", no_argument, &mac_updating, 1},
- { "no-mac-updating", no_argument, &mac_updating, 0},
- {NULL, 0, 0, 0}
- };
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+ while ((opt = getopt_long(argc, argvopt, short_options,
lgopts, &option_index)) != EOF) {
switch (opt) {
@@ -457,7 +475,7 @@ l2fwd_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 3cfbb40e..ea0b5b1e 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -163,7 +163,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -1776,7 +1776,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index b65d683c..9d57fdef 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -147,7 +147,7 @@
/*
* Configurable number of RX/TX ring descriptors
*/
-#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_RX_DESC_DEFAULT 512
#define RTE_TEST_TX_DESC_DEFAULT 512
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -164,6 +164,8 @@ static uint32_t enabled_port_mask = 0;
static int promiscuous_on = 0;
/* NUMA is enabled by default. */
static int numa_on = 1;
+static int parse_ptype; /**< Parse packet type using rx callback, and */
+ /**< disabled by default */
enum freq_scale_hint_t
{
@@ -221,7 +223,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -377,6 +379,7 @@ static void
signal_exit_now(int sigtype)
{
unsigned lcore_id;
+ unsigned int portid, nb_ports;
int ret;
if (sigtype == SIGINT) {
@@ -391,6 +394,15 @@ signal_exit_now(int sigtype)
"library de-initialization failed on "
"core%u\n", lcore_id);
}
+
+ nb_ports = rte_eth_dev_count();
+ for (portid = 0; portid < nb_ports; portid++) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ rte_eth_dev_stop(portid);
+ rte_eth_dev_close(portid);
+ }
}
rte_exit(EXIT_SUCCESS, "User forced exit\n");
@@ -607,6 +619,48 @@ get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
#endif
static inline void
+parse_ptype_one(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth_hdr;
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ uint16_t ether_type;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_type = eth_hdr->ether_type;
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ unsigned int i;
+
+ for (i = 0; i < nb_pkts; ++i)
+ parse_ptype_one(pkts[i]);
+
+ return nb_pkts;
+}
+
+static int
+add_cb_parse_ptype(uint8_t portid, uint16_t queueid)
+{
+ printf("Port %d: softly parse packet type info\n", portid);
+ if (rte_eth_add_rx_callback(portid, queueid, cb_parse_ptype, NULL))
+ return 0;
+
+ printf("Failed to add rx callback: port=%d\n", portid);
+ return -1;
+}
+
+static inline void
l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
struct lcore_conf *qconf)
{
@@ -1108,7 +1162,8 @@ print_usage(const char *prgname)
" --config (port,queue,lcore): rx queues configuration\n"
" --no-numa: optional, disable numa awareness\n"
" --enable-jumbo: enable jumbo frame"
- " which max packet len is PKTLEN in decimal (64-9600)\n",
+ " which max packet len is PKTLEN in decimal (64-9600)\n"
+ " --parse-ptype: parse packet type by software\n",
prgname);
}
@@ -1202,6 +1257,8 @@ parse_config(const char *q_arg)
return 0;
}
+#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
+
/* Parse the argument given in the command line of the application */
static int
parse_args(int argc, char **argv)
@@ -1214,6 +1271,7 @@ parse_args(int argc, char **argv)
{"config", 1, 0, 0},
{"no-numa", 0, 0, 0},
{"enable-jumbo", 0, 0, 0},
+ {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
{NULL, 0, 0, 0}
};
@@ -1284,6 +1342,13 @@ parse_args(int argc, char **argv)
(unsigned int)port_conf.rxmode.max_rx_pkt_len);
}
+ if (!strncmp(lgopts[option_index].name,
+ CMD_LINE_OPT_PARSE_PTYPE,
+ sizeof(CMD_LINE_OPT_PARSE_PTYPE))) {
+ printf("soft parse-ptype is enabled\n");
+ parse_ptype = 1;
+ }
+
break;
default:
@@ -1296,7 +1361,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -1531,6 +1596,50 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
}
}
+static int check_ptype(uint8_t portid)
+{
+ int i, ret;
+ int ptype_l3_ipv4 = 0;
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ int ptype_l3_ipv6 = 0;
+#endif
+ uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
+ if (ret <= 0)
+ return 0;
+
+ uint32_t ptypes[ret];
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
+ for (i = 0; i < ret; ++i) {
+ if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+ ptype_l3_ipv4 = 1;
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+ ptype_l3_ipv6 = 1;
+#endif
+ }
+
+ if (ptype_l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ if (ptype_l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+ if (ptype_l3_ipv4)
+#else /* APP_LOOKUP_EXACT_MATCH */
+ if (ptype_l3_ipv4 && ptype_l3_ipv6)
+#endif
+ return 1;
+
+ return 0;
+
+}
+
int
main(int argc, char **argv)
{
@@ -1545,6 +1654,7 @@ main(int argc, char **argv)
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
uint8_t portid, nb_rx_queue, queue, socketid;
+ uint16_t org_rxq_intr = port_conf.intr_conf.rxq;
/* catch SIGINT and restore cpufreq governor to ondemand */
signal(SIGINT, signal_exit_now);
@@ -1605,8 +1715,13 @@ main(int argc, char **argv)
n_tx_queue = dev_txq_num;
printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
nb_rx_queue, (unsigned)n_tx_queue );
+ /* If number of Rx queue is 0, no need to enable Rx interrupt */
+ if (nb_rx_queue == 0)
+ port_conf.intr_conf.rxq = 0;
ret = rte_eth_dev_configure(portid, nb_rx_queue,
(uint16_t)n_tx_queue, &port_conf);
+ /* Revert to original value */
+ port_conf.intr_conf.rxq = org_rxq_intr;
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
@@ -1716,6 +1831,14 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
+
+ if (parse_ptype) {
+ if (add_cb_parse_ptype(portid, queueid) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Fail to add ptype cb\n");
+ } else if (!check_ptype(portid))
+ rte_exit(EXIT_FAILURE,
+ "PMD can not provide needed ptypes\n");
}
}
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index f56e8db9..797f722a 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -197,7 +197,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -816,7 +816,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/l3fwd/l3fwd_lpm.h b/examples/l3fwd/l3fwd_lpm.h
index a43c5070..258a82fe 100644
--- a/examples/l3fwd/l3fwd_lpm.h
+++ b/examples/l3fwd/l3fwd_lpm.h
@@ -49,7 +49,7 @@ lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
static inline uint8_t
lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
{
- uint8_t next_hop;
+ uint32_t next_hop;
struct rte_lpm6 *ipv6_l3fwd_lookup_struct =
(struct rte_lpm6 *)lookup_struct;
diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
index 538fe3d7..aa06b6d3 100644
--- a/examples/l3fwd/l3fwd_lpm_sse.h
+++ b/examples/l3fwd/l3fwd_lpm_sse.h
@@ -40,8 +40,7 @@ static inline __attribute__((always_inline)) uint16_t
lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
uint8_t portid)
{
- uint32_t next_hop_ipv4;
- uint8_t next_hop_ipv6;
+ uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
struct ipv4_hdr *ipv4_hdr;
struct ether_hdr *eth_hdr;
@@ -51,9 +50,11 @@ lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
- return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct,
- rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop_ipv4) == 0) ?
- next_hop_ipv4 : portid);
+ return (uint16_t) (
+ (rte_lpm_lookup(qconf->ipv4_lookup_struct,
+ rte_be_to_cpu_32(ipv4_hdr->dst_addr),
+ &next_hop) == 0) ?
+ next_hop : portid);
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
@@ -61,8 +62,8 @@ lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop_ipv6) == 0)
- ? next_hop_ipv6 : portid);
+ ipv6_hdr->dst_addr, &next_hop) == 0)
+ ? next_hop : portid);
}
@@ -78,14 +79,13 @@ static inline __attribute__((always_inline)) uint16_t
lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
uint32_t dst_ipv4, uint8_t portid)
{
- uint32_t next_hop_ipv4;
- uint8_t next_hop_ipv6;
+ uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
struct ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
- &next_hop_ipv4) == 0) ? next_hop_ipv4 : portid);
+ &next_hop) == 0) ? next_hop : portid);
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
@@ -93,8 +93,8 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop_ipv6) == 0)
- ? next_hop_ipv6 : portid);
+ ipv6_hdr->dst_addr, &next_hop) == 0)
+ ? next_hop : portid);
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 7223e773..fd6605bf 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -156,7 +156,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -474,6 +474,13 @@ parse_eth_dest(const char *optarg)
#define MAX_JUMBO_PKT_LEN 9600
#define MEMPOOL_CACHE_SIZE 256
+static const char short_options[] =
+ "p:" /* portmask */
+ "P" /* promiscuous */
+ "L" /* enable long prefix match */
+ "E" /* enable exact match */
+ ;
+
#define CMD_LINE_OPT_CONFIG "config"
#define CMD_LINE_OPT_ETH_DEST "eth-dest"
#define CMD_LINE_OPT_NO_NUMA "no-numa"
@@ -481,6 +488,31 @@ parse_eth_dest(const char *optarg)
#define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
#define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options */
+ CMD_LINE_OPT_MIN_NUM = 256,
+ CMD_LINE_OPT_CONFIG_NUM,
+ CMD_LINE_OPT_ETH_DEST_NUM,
+ CMD_LINE_OPT_NO_NUMA_NUM,
+ CMD_LINE_OPT_IPV6_NUM,
+ CMD_LINE_OPT_ENABLE_JUMBO_NUM,
+ CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
+ CMD_LINE_OPT_PARSE_PTYPE_NUM,
+};
+
+static const struct option lgopts[] = {
+ {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
+ {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
+ {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
+ {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
+ {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
+ {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
+ {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
+ {NULL, 0, 0, 0}
+};
/*
* This expression is used to calculate the number of mbufs needed
@@ -504,16 +536,6 @@ parse_args(int argc, char **argv)
char **argvopt;
int option_index;
char *prgname = argv[0];
- static struct option lgopts[] = {
- {CMD_LINE_OPT_CONFIG, 1, 0, 0},
- {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
- {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
- {CMD_LINE_OPT_IPV6, 0, 0, 0},
- {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
- {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
- {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
- {NULL, 0, 0, 0}
- };
argvopt = argv;
@@ -534,7 +556,7 @@ parse_args(int argc, char **argv)
"L3FWD: LPM and EM are mutually exclusive, select only one";
const char *str13 = "L3FWD: LPM or EM none selected, default LPM on";
- while ((opt = getopt_long(argc, argvopt, "p:PLE",
+ while ((opt = getopt_long(argc, argvopt, short_options,
lgopts, &option_index)) != EOF) {
switch (opt) {
@@ -547,6 +569,7 @@ parse_args(int argc, char **argv)
return -1;
}
break;
+
case 'P':
printf("%s\n", str2);
promiscuous_on = 1;
@@ -563,89 +586,71 @@ parse_args(int argc, char **argv)
break;
/* long options */
- case 0:
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_CONFIG,
- sizeof(CMD_LINE_OPT_CONFIG))) {
-
- ret = parse_config(optarg);
- if (ret) {
- printf("%s\n", str5);
- print_usage(prgname);
- return -1;
- }
- }
-
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_ETH_DEST,
- sizeof(CMD_LINE_OPT_ETH_DEST))) {
- parse_eth_dest(optarg);
- }
-
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_NO_NUMA,
- sizeof(CMD_LINE_OPT_NO_NUMA))) {
- printf("%s\n", str6);
- numa_on = 0;
+ case CMD_LINE_OPT_CONFIG_NUM:
+ ret = parse_config(optarg);
+ if (ret) {
+ printf("%s\n", str5);
+ print_usage(prgname);
+ return -1;
}
+ break;
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_IPV6,
- sizeof(CMD_LINE_OPT_IPV6))) {
- printf("%sn", str7);
- ipv6 = 1;
- }
+ case CMD_LINE_OPT_ETH_DEST_NUM:
+ parse_eth_dest(optarg);
+ break;
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_ENABLE_JUMBO,
- sizeof(CMD_LINE_OPT_ENABLE_JUMBO))) {
- struct option lenopts = {
- "max-pkt-len", required_argument, 0, 0
- };
-
- printf("%s\n", str8);
- port_conf.rxmode.jumbo_frame = 1;
-
- /*
- * if no max-pkt-len set, use the default
- * value ETHER_MAX_LEN.
- */
- if (0 == getopt_long(argc, argvopt, "",
- &lenopts, &option_index)) {
- ret = parse_max_pkt_len(optarg);
- if ((ret < 64) ||
- (ret > MAX_JUMBO_PKT_LEN)) {
- printf("%s\n", str9);
- print_usage(prgname);
- return -1;
- }
- port_conf.rxmode.max_rx_pkt_len = ret;
- }
- printf("%s %u\n", str10,
- (unsigned int)port_conf.rxmode.max_rx_pkt_len);
- }
+ case CMD_LINE_OPT_NO_NUMA_NUM:
+ printf("%s\n", str6);
+ numa_on = 0;
+ break;
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_HASH_ENTRY_NUM,
- sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
+ case CMD_LINE_OPT_IPV6_NUM:
+ printf("%sn", str7);
+ ipv6 = 1;
+ break;
- ret = parse_hash_entry_number(optarg);
- if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
- hash_entry_number = ret;
- } else {
- printf("%s\n", str11);
+ case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
+ struct option lenopts = {
+ "max-pkt-len", required_argument, 0, 0
+ };
+
+ printf("%s\n", str8);
+ port_conf.rxmode.jumbo_frame = 1;
+
+ /*
+ * if no max-pkt-len set, use the default
+ * value ETHER_MAX_LEN.
+ */
+ if (getopt_long(argc, argvopt, "",
+ &lenopts, &option_index) == 0) {
+ ret = parse_max_pkt_len(optarg);
+ if ((ret < 64) ||
+ (ret > MAX_JUMBO_PKT_LEN)) {
+ printf("%s\n", str9);
print_usage(prgname);
return -1;
}
+ port_conf.rxmode.max_rx_pkt_len = ret;
}
+ printf("%s %u\n", str10,
+ (unsigned int)port_conf.rxmode.max_rx_pkt_len);
+ break;
+ }
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_PARSE_PTYPE,
- sizeof(CMD_LINE_OPT_PARSE_PTYPE))) {
- printf("soft parse-ptype is enabled\n");
- parse_ptype = 1;
+ case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
+ ret = parse_hash_entry_number(optarg);
+ if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
+ hash_entry_number = ret;
+ } else {
+ printf("%s\n", str11);
+ print_usage(prgname);
+ return -1;
}
+ break;
+ case CMD_LINE_OPT_PARSE_PTYPE_NUM:
+ printf("soft parse-ptype is enabled\n");
+ parse_ptype = 1;
break;
default:
@@ -683,7 +688,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index 14a038b7..25da28eb 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -116,7 +116,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -451,7 +451,7 @@ lsi_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/load_balancer/config.c b/examples/load_balancer/config.c
index 157fd528..07f92a1a 100644
--- a/examples/load_balancer/config.c
+++ b/examples/load_balancer/config.c
@@ -758,7 +758,7 @@ app_parse_args(int argc, char **argv)
argv[optind - 1] = prgname;
ret = optind - 1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c
index e07850be..abd05a31 100644
--- a/examples/load_balancer/init.c
+++ b/examples/load_balancer/init.c
@@ -81,7 +81,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 6944325d..7f918aa4 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -144,9 +144,10 @@ app_lcore_io_rx_buffer_to_send (
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- bsz);
+ bsz,
+ NULL);
- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -310,9 +311,10 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- lp->rx.mbuf_out[worker].n_mbufs);
+ lp->rx.mbuf_out[worker].n_mbufs,
+ NULL);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -347,11 +349,11 @@ app_lcore_io_tx(
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
- bsz_rd);
+ bsz_rd,
+ NULL);
- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }
n_mbufs += bsz_rd;
@@ -418,10 +420,12 @@ static inline void
app_lcore_io_tx_flush(struct app_lcore_params_io *lp)
{
uint8_t port;
+ uint32_t i;
- for (port = 0; port < lp->tx.n_nic_ports; port ++) {
+ for (i = 0; i < lp->tx.n_nic_ports; i++) {
uint32_t n_pkts;
+ port = lp->tx.nic_ports[i];
if (likely((lp->tx.mbuf_out_flush[port] == 0) ||
(lp->tx.mbuf_out[port].n_mbufs == 0))) {
lp->tx.mbuf_out_flush[port] = 1;
@@ -503,11 +507,11 @@ app_lcore_worker(
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
- bsz_rd);
+ bsz_rd,
+ NULL);
- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }
#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@ -555,11 +559,12 @@ app_lcore_worker(
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- bsz_wr);
+ bsz_wr,
+ NULL);
#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@ -572,7 +577,7 @@ app_lcore_worker(
}
#endif
- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -607,9 +612,10 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- lp->mbuf_out[port].n_mbufs);
+ lp->mbuf_out[port].n_mbufs,
+ NULL);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index d4f9ca37..01b535c2 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -276,14 +276,11 @@ main(int argc, char *argv[])
printf("[Press Ctrl-C to quit ...]\n");
for (;;) {
- uint16_t i, rx_pkts = PKT_READ_SIZE;
+ uint16_t i, rx_pkts;
uint8_t port;
- /* try dequeuing max possible packets first, if that fails, get the
- * most we can. Loop body should only execute once, maximum */
- while (rx_pkts > 0 &&
- unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
- rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
+ PKT_READ_SIZE, NULL);
if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index a6dc12d5..c2b0261d 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)
cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) != 0){
+ cl_rx_buf[client].count, NULL) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
index 2d951d93..d922522f 100644
--- a/examples/multi_process/l2fwd_fork/main.c
+++ b/examples/multi_process/l2fwd_fork/main.c
@@ -77,8 +77,7 @@
#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
#define MBUF_NAME "mbuf_pool_%d"
-#define MBUF_SIZE \
-(RTE_MBUF_DEFAULT_DATAROOM + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
#define NB_MBUF 8192
#define RING_MASTER_NAME "l2fwd_ring_m2s_"
#define RING_SLAVE_NAME "l2fwd_ring_s2m_"
@@ -163,7 +162,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -672,6 +671,8 @@ l2fwd_main_loop(void)
port_statistics[portid].tx += sent;
}
+
+ prev_tsc = cur_tsc;
}
/*
@@ -865,7 +866,7 @@ l2fwd_parse_args(int argc, char **argv)
return -1;
}
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -989,14 +990,10 @@ main(int argc, char **argv)
flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, portid);
l2fwd_pktmbuf_pool[portid] =
- rte_mempool_create(buf_name, NB_MBUF,
- MBUF_SIZE, 32,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(), flags);
+ rte_pktmbuf_pool_create(buf_name, NB_MBUF, 32,
+ 0, MBUF_DATA_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool[portid] == NULL)
- rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
printf("Create mbuf %s\n", buf_name);
}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index d30ff4a4..0990d965 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -193,7 +193,7 @@ smp_parse_args(int argc, char **argv)
ports[num_ports++] = (uint8_t)i;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -213,7 +213,7 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/netmap_compat/bridge/Makefile b/examples/netmap_compat/bridge/Makefile
index 50d96e81..1d4ddfff 100644
--- a/examples/netmap_compat/bridge/Makefile
+++ b/examples/netmap_compat/bridge/Makefile
@@ -42,6 +42,7 @@ ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
+clean:
else
# binary name
diff --git a/examples/netmap_compat/bridge/bridge.c b/examples/netmap_compat/bridge/bridge.c
index 53f5fdb6..2f2b6baa 100644
--- a/examples/netmap_compat/bridge/bridge.c
+++ b/examples/netmap_compat/bridge/bridge.c
@@ -59,7 +59,7 @@ struct rte_eth_conf eth_conf = {
.hw_ip_checksum = 0,
.hw_vlan_filter = 0,
.jumbo_frame = 0,
- .hw_strip_crc = 0,
+ .hw_strip_crc = 1,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 3c88b86e..49ae35b8 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -216,7 +216,7 @@ parse_args(int argc, char **argv)
}
argv[optind-1] = prgname;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return 0;
}
@@ -229,7 +229,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
/* free the mbufs which failed from transmit */
app_stats.tx.ro_tx_failed_pkts += count;
- RTE_LOG(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
+ RTE_LOG_DP(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
pktmbuf_free_bulk(unsent, count);
}
@@ -410,7 +410,7 @@ rx_thread(struct rte_ring *ring_out)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
pkts, MAX_PKTS_BURST);
if (nb_rx_pkts == 0) {
- RTE_LOG(DEBUG, REORDERAPP,
+ RTE_LOG_DP(DEBUG, REORDERAPP,
"%s():Received zero packets\n", __func__);
continue;
}
@@ -421,8 +421,8 @@ rx_thread(struct rte_ring *ring_out)
pkts[i++]->seqn = seqn++;
/* enqueue to rx_to_workers ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
- nb_rx_pkts);
+ ret = rte_ring_enqueue_burst(ring_out,
+ (void *)pkts, nb_rx_pkts, NULL);
app_stats.rx.enqueue_pkts += ret;
if (unlikely(ret < nb_rx_pkts)) {
app_stats.rx.enqueue_failed_pkts +=
@@ -462,7 +462,7 @@ worker_thread(void *args_ptr)
/* dequeue the mbufs from rx_to_workers ring */
burst_size = rte_ring_dequeue_burst(ring_in,
- (void *)burst_buffer, MAX_PKTS_BURST);
+ (void *)burst_buffer, MAX_PKTS_BURST, NULL);
if (unlikely(burst_size == 0))
continue;
@@ -473,7 +473,8 @@ worker_thread(void *args_ptr)
burst_buffer[i++]->port ^= xor_val;
/* enqueue the modified mbufs to workers_to_tx ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
+ ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
+ burst_size, NULL);
__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
if (unlikely(ret < burst_size)) {
/* Return the mbufs to their respective pool, dropping packets */
@@ -509,7 +510,7 @@ send_thread(struct send_thread_args *args)
/* deque the mbufs from workers_to_tx ring */
nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);
if (unlikely(nb_dq_mbufs == 0))
continue;
@@ -522,7 +523,7 @@ send_thread(struct send_thread_args *args)
if (ret == -1 && rte_errno == ERANGE) {
/* Too early pkts should be transmitted out directly */
- RTE_LOG(DEBUG, REORDERAPP,
+ RTE_LOG_DP(DEBUG, REORDERAPP,
"%s():Cannot reorder early packet "
"direct enqueuing to TX\n", __func__);
outp = mbufs[i]->port;
@@ -594,7 +595,7 @@ tx_thread(struct rte_ring *ring_in)
/* deque the mbufs from workers_to_tx ring */
dqnum = rte_ring_dequeue_burst(ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);
if (unlikely(dqnum == 0))
continue;
diff --git a/examples/performance-thread/common/arch/x86/ctx.h b/examples/performance-thread/common/arch/x86/ctx.h
index 03860508..a41ce05a 100644
--- a/examples/performance-thread/common/arch/x86/ctx.h
+++ b/examples/performance-thread/common/arch/x86/ctx.h
@@ -35,6 +35,10 @@
#ifndef CTX_H
#define CTX_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/*
* CPU context registers
*/
@@ -54,4 +58,8 @@ void
ctx_switch(struct ctx *new_ctx, struct ctx *curr_ctx);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* RTE_CTX_H_ */
diff --git a/examples/performance-thread/common/common.mk b/examples/performance-thread/common/common.mk
index d3de5fc6..f6cab771 100644
--- a/examples/performance-thread/common/common.mk
+++ b/examples/performance-thread/common/common.mk
@@ -30,13 +30,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# list the C files belonhing to the lthread subsystem, these are common to all lthread apps
-SRCS-y += ../common/lthread.c \
- ../common/lthread_sched.c \
- ../common/lthread_cond.c \
- ../common/lthread_tls.c \
- ../common/lthread_mutex.c \
- ../common/lthread_diag.c \
- ../common/arch/x86/ctx.c
+# list the C files belonging to the lthread subsystem, these are common to all
+# lthread apps. Any makefile including this should set VPATH to include this
+# directory path
+#
+
+MKFILE_PATH=$(abspath $(dir $(lastword $(MAKEFILE_LIST))))
+
+VPATH := $(MKFILE_PATH) $(MKFILE_PATH)/arch/x86
+
+SRCS-y += lthread.c lthread_sched.c lthread_cond.c lthread_tls.c lthread_mutex.c lthread_diag.c ctx.c
-INCLUDES += -I$(RTE_SDK)/examples/performance-thread/common/ -I$(RTE_SDK)/examples/performance-thread/common/arch/x86/
+INCLUDES += -I$(MKFILE_PATH) -I$(MKFILE_PATH)/arch/x86/
diff --git a/examples/performance-thread/common/lthread.h b/examples/performance-thread/common/lthread.h
index 8c77af82..5c2c1a5f 100644
--- a/examples/performance-thread/common/lthread.h
+++ b/examples/performance-thread/common/lthread.h
@@ -62,6 +62,10 @@
#ifndef LTHREAD_H_
#define LTHREAD_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <rte_per_lcore.h>
#include "lthread_api.h"
@@ -96,4 +100,8 @@ _lthread_init(struct lthread *lt,
void _lthread_set_stack(struct lthread *lt, void *stack, size_t stack_size);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_H_ */
diff --git a/examples/performance-thread/common/lthread_api.h b/examples/performance-thread/common/lthread_api.h
index ec976103..ff245a08 100644
--- a/examples/performance-thread/common/lthread_api.h
+++ b/examples/performance-thread/common/lthread_api.h
@@ -124,6 +124,10 @@
#ifndef LTHREAD_H
#define LTHREAD_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stdint.h>
#include <sys/socket.h>
#include <fcntl.h>
@@ -829,4 +833,8 @@ int lthread_cond_signal(struct lthread_cond *c);
*/
int lthread_cond_broadcast(struct lthread_cond *c);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_H */
diff --git a/examples/performance-thread/common/lthread_cond.h b/examples/performance-thread/common/lthread_cond.h
index 5bd02a7d..5e5f14be 100644
--- a/examples/performance-thread/common/lthread_cond.h
+++ b/examples/performance-thread/common/lthread_cond.h
@@ -62,6 +62,10 @@
#ifndef LTHREAD_COND_H_
#define LTHREAD_COND_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_queue.h"
#define MAX_COND_NAME_SIZE 64
@@ -74,4 +78,8 @@ struct lthread_cond {
uint64_t diag_ref; /* optional ref to user diag data */
} __rte_cache_aligned;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_COND_H_ */
diff --git a/examples/performance-thread/common/lthread_diag.h b/examples/performance-thread/common/lthread_diag.h
index 2877d311..3dce8e0e 100644
--- a/examples/performance-thread/common/lthread_diag.h
+++ b/examples/performance-thread/common/lthread_diag.h
@@ -34,6 +34,10 @@
#ifndef LTHREAD_DIAG_H_
#define LTHREAD_DIAG_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stdint.h>
#include <inttypes.h>
@@ -129,4 +133,9 @@ extern uint64_t diag_mask;
#define DIAG_USED __rte_unused
#endif /* LTHREAD_DIAG */
+
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_DIAG_H_ */
diff --git a/examples/performance-thread/common/lthread_diag_api.h b/examples/performance-thread/common/lthread_diag_api.h
index 7ee514f8..2fda0951 100644
--- a/examples/performance-thread/common/lthread_diag_api.h
+++ b/examples/performance-thread/common/lthread_diag_api.h
@@ -33,6 +33,10 @@
#ifndef LTHREAD_DIAG_API_H_
#define LTHREAD_DIAG_API_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stdint.h>
#include <inttypes.h>
@@ -322,4 +326,8 @@ lthread_cond_diag_ref(struct lthread_cond *c);
uint64_t
lthread_mutex_diag_ref(struct lthread_mutex *m);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_DIAG_API_H_ */
diff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h
index 031d8afc..3f7fb92d 100644
--- a/examples/performance-thread/common/lthread_int.h
+++ b/examples/performance-thread/common/lthread_int.h
@@ -62,6 +62,10 @@
#include <lthread_api.h>
#define LTHREAD_INT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stdint.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -197,4 +201,8 @@ struct lthread {
uint64_t diag_ref; /* ref to user diag data */
} __rte_cache_aligned;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_INT_H */
diff --git a/examples/performance-thread/common/lthread_mutex.h b/examples/performance-thread/common/lthread_mutex.h
index 4d30b2e7..e78db91d 100644
--- a/examples/performance-thread/common/lthread_mutex.h
+++ b/examples/performance-thread/common/lthread_mutex.h
@@ -35,6 +35,10 @@
#ifndef LTHREAD_MUTEX_H_
#define LTHREAD_MUTEX_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_queue.h"
@@ -49,4 +53,8 @@ struct lthread_mutex {
uint64_t diag_ref; /* optional ref to user diag data */
} __rte_cache_aligned;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_MUTEX_H_ */
diff --git a/examples/performance-thread/common/lthread_objcache.h b/examples/performance-thread/common/lthread_objcache.h
index d7e35825..6e5195ba 100644
--- a/examples/performance-thread/common/lthread_objcache.h
+++ b/examples/performance-thread/common/lthread_objcache.h
@@ -33,6 +33,10 @@
#ifndef LTHREAD_OBJCACHE_H_
#define LTHREAD_OBJCACHE_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <string.h>
#include <rte_per_lcore.h>
@@ -154,5 +158,8 @@ _lthread_objcache_free(struct lthread_objcache *c, void *obj)
}
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_OBJCACHE_H_ */
diff --git a/examples/performance-thread/common/lthread_pool.h b/examples/performance-thread/common/lthread_pool.h
index 27680eab..fb0c578b 100644
--- a/examples/performance-thread/common/lthread_pool.h
+++ b/examples/performance-thread/common/lthread_pool.h
@@ -69,6 +69,10 @@
#ifndef LTHREAD_POOL_H_
#define LTHREAD_POOL_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <rte_malloc.h>
#include <rte_per_lcore.h>
#include <rte_log.h>
@@ -328,5 +332,8 @@ _qnode_pool_destroy(struct qnode_pool *p)
return 0;
}
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_POOL_H_ */
diff --git a/examples/performance-thread/common/lthread_queue.h b/examples/performance-thread/common/lthread_queue.h
index 2c55fcec..4fc2074e 100644
--- a/examples/performance-thread/common/lthread_queue.h
+++ b/examples/performance-thread/common/lthread_queue.h
@@ -69,6 +69,10 @@
#ifndef LTHREAD_QUEUE_H_
#define LTHREAD_QUEUE_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <string.h>
#include <rte_prefetch.h>
@@ -298,5 +302,8 @@ _lthread_queue_remove(struct lthread_queue *q)
return NULL;
}
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_QUEUE_H_ */
diff --git a/examples/performance-thread/common/lthread_sched.h b/examples/performance-thread/common/lthread_sched.h
index 4ce56c27..7cddda9c 100644
--- a/examples/performance-thread/common/lthread_sched.h
+++ b/examples/performance-thread/common/lthread_sched.h
@@ -62,6 +62,10 @@
#ifndef LTHREAD_SCHED_H_
#define LTHREAD_SCHED_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_int.h"
#include "lthread_queue.h"
#include "lthread_objcache.h"
@@ -148,5 +152,8 @@ extern struct lthread_sched *schedcore[];
void _sched_timer_cb(struct rte_timer *tim, void *arg);
void _sched_shutdown(__rte_unused void *arg);
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_SCHED_H_ */
diff --git a/examples/performance-thread/common/lthread_timer.h b/examples/performance-thread/common/lthread_timer.h
index b5e6fb0e..7c03d673 100644
--- a/examples/performance-thread/common/lthread_timer.h
+++ b/examples/performance-thread/common/lthread_timer.h
@@ -35,6 +35,10 @@
#ifndef LTHREAD_TIMER_H_
#define LTHREAD_TIMER_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_int.h"
#include "lthread_sched.h"
@@ -42,11 +46,22 @@
static inline uint64_t
_ns_to_clks(uint64_t ns)
{
- unsigned __int128 clkns = rte_get_tsc_hz();
+ /*
+ * clkns needs to be divided by 1E9 to get ns clocks. However,
+ * dividing by this first would lose a lot of accuracy.
+ * Dividing after a multiply by ns, could cause overflow of
+ * uint64_t if ns is about 5 seconds [if we assume a max tsc
+ * rate of 4GHz]. Therefore we first divide by 1E4, then
+ * multiply and finally divide by 1E5. This allows ns to be
+ * values many hours long, without overflow, while still keeping
+ * reasonable accuracy.
+ */
+ uint64_t clkns = rte_get_tsc_hz() / 1e4;
clkns *= ns;
- clkns /= 1000000000;
- return (uint64_t) clkns;
+ clkns /= 1e5;
+
+ return clkns;
}
@@ -75,5 +90,8 @@ _timer_stop(struct lthread *lt)
}
}
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_TIMER_H_ */
diff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c
index 6876f831..47505f2d 100644
--- a/examples/performance-thread/common/lthread_tls.c
+++ b/examples/performance-thread/common/lthread_tls.c
@@ -42,7 +42,6 @@
#include <fcntl.h>
#include <sys/time.h>
#include <sys/mman.h>
-#include <execinfo.h>
#include <sched.h>
#include <rte_malloc.h>
diff --git a/examples/performance-thread/common/lthread_tls.h b/examples/performance-thread/common/lthread_tls.h
index 86cbfadc..fff3c0db 100644
--- a/examples/performance-thread/common/lthread_tls.h
+++ b/examples/performance-thread/common/lthread_tls.h
@@ -34,6 +34,10 @@
#ifndef LTHREAD_TLS_H_
#define LTHREAD_TLS_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_api.h"
#define RTE_PER_LTHREAD_SECTION_SIZE \
@@ -53,5 +57,8 @@ void _lthread_tls_destroy(struct lthread *lt);
void _lthread_key_pool_init(void);
void _lthread_tls_alloc(struct lthread *lt);
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_TLS_H_ */
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index fdc90b28..2d98473e 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,68 @@
#define APP_LOOKUP_METHOD APP_LOOKUP_LPM
#endif
+#ifndef __GLIBC__ /* sched_getcpu() is glibc specific */
+#define sched_getcpu() rte_lcore_id()
+#endif
+
+static int
+check_ptype(int portid)
+{
+ int i, ret;
+ int ipv4 = 0, ipv6 = 0;
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, RTE_PTYPE_L3_MASK, NULL,
+ 0);
+ if (ret <= 0)
+ return 0;
+
+ uint32_t ptypes[ret];
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, RTE_PTYPE_L3_MASK,
+ ptypes, ret);
+ for (i = 0; i < ret; ++i) {
+ if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+ ipv4 = 1;
+ if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+ ipv6 = 1;
+ }
+
+ if (ipv4 && ipv6)
+ return 1;
+
+ return 0;
+}
+
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth_hdr;
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ uint16_t ether_type;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_type = eth_hdr->ether_type;
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+cb_parse_ptype(__rte_unused uint8_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, __rte_unused void *user_param)
+{
+ unsigned int i;
+
+ for (i = 0; i < nb_pkts; i++)
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
/*
* When set to zero, simple forwaring path is eanbled.
* When set to one, optimized forwarding path is enabled.
@@ -170,8 +232,9 @@ static __m128i val_eth[RTE_MAX_ETHPORTS];
/* mask of enabled ports */
static uint32_t enabled_port_mask;
-static int promiscuous_on; /**< $et in promiscuous mode off by default. */
+static int promiscuous_on; /**< Set in promiscuous mode off by default. */
static int numa_on = 1; /**< NUMA is enabled by default. */
+static int parse_ptype_on;
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
static int ipv6; /**< ipv6 is false by default. */
@@ -282,7 +345,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -850,7 +913,7 @@ static inline uint8_t
get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
lookup6_struct_t *ipv6_l3fwd_lookup_struct)
{
- uint8_t next_hop;
+ uint32_t next_hop;
return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
((struct ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
@@ -1337,15 +1400,14 @@ rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
static inline __attribute__((always_inline)) uint16_t
get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
{
- uint32_t next_hop_ipv4;
- uint8_t next_hop_ipv6;
+ uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
struct ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
return (uint16_t) ((rte_lpm_lookup(
RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct, dst_ipv4,
- &next_hop_ipv4) == 0) ? next_hop_ipv4 : portid);
+ &next_hop) == 0) ? next_hop : portid);
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
@@ -1354,8 +1416,8 @@ get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
return (uint16_t) ((rte_lpm6_lookup(
RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop_ipv6) == 0) ? next_hop_ipv6 :
- portid);
+ ipv6_hdr->dst_addr, &next_hop) == 0) ?
+ next_hop : portid);
}
@@ -2019,7 +2081,7 @@ lthread_tx_per_ring(void *dummy)
*/
SET_CPU_BUSY(tx_conf, CPU_POLL);
nb_rx = rte_ring_sc_dequeue_burst(ring, (void **)pkts_burst,
- MAX_PKT_BURST);
+ MAX_PKT_BURST, NULL);
SET_CPU_IDLE(tx_conf, CPU_POLL);
if (nb_rx > 0) {
@@ -2155,7 +2217,7 @@ lthread_rx(void *dummy)
ret = rte_ring_sp_enqueue_burst(
rx_conf->ring[worker_id],
(void **) pkts_burst,
- nb_rx);
+ nb_rx, NULL);
new_len = old_len + ret;
@@ -2323,7 +2385,7 @@ pthread_tx(void *dummy)
*/
SET_CPU_BUSY(tx_conf, CPU_POLL);
nb_rx = rte_ring_sc_dequeue_burst(tx_conf->ring,
- (void **)pkts_burst, MAX_PKT_BURST);
+ (void **)pkts_burst, MAX_PKT_BURST, NULL);
SET_CPU_IDLE(tx_conf, CPU_POLL);
if (unlikely(nb_rx == 0)) {
@@ -2395,7 +2457,7 @@ pthread_rx(void *dummy)
SET_CPU_BUSY(rx_conf, CPU_PROCESS);
worker_id = (worker_id + 1) % rx_conf->n_ring;
n = rte_ring_sp_enqueue_burst(rx_conf->ring[worker_id],
- (void **)pkts_burst, nb_rx);
+ (void **)pkts_burst, nb_rx, NULL);
if (unlikely(n != nb_rx)) {
uint32_t k;
@@ -2610,6 +2672,7 @@ print_usage(const char *prgname)
" [--rx (port,queue,lcore,thread)[,(port,queue,lcore,thread]]"
" [--tx (lcore,thread)[,(lcore,thread]]"
" [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
+ " [--parse-ptype]\n\n"
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
" -P : enable promiscuous mode\n"
" --rx (port,queue,lcore,thread): rx queues configuration\n"
@@ -2621,7 +2684,8 @@ print_usage(const char *prgname)
" --enable-jumbo: enable jumbo frame"
" which max packet len is PKTLEN in decimal (64-9600)\n"
" --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n"
- " --no-lthreads: turn off lthread model\n",
+ " --no-lthreads: turn off lthread model\n"
+ " --parse-ptype: set to use software to analyze packet type\n\n",
prgname);
}
@@ -2840,6 +2904,7 @@ parse_eth_dest(const char *optarg)
#define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
#define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
#define CMD_LINE_OPT_NO_LTHREADS "no-lthreads"
+#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
/* Parse the argument given in the command line of the application */
static int
@@ -2859,6 +2924,7 @@ parse_args(int argc, char **argv)
{CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
{CMD_LINE_OPT_NO_LTHREADS, 0, 0, 0},
+ {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
{NULL, 0, 0, 0}
};
@@ -2935,6 +3001,12 @@ parse_args(int argc, char **argv)
lthreads_on = 0;
}
+ if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_PARSE_PTYPE,
+ sizeof(CMD_LINE_OPT_PARSE_PTYPE))) {
+ printf("software packet type parsing enabled\n");
+ parse_ptype_on = 1;
+ }
+
if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
sizeof(CMD_LINE_OPT_ENABLE_JUMBO))) {
struct option lenopts = {"max-pkt-len", required_argument, 0,
@@ -2983,7 +3055,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -3623,6 +3695,31 @@ main(int argc, char **argv)
rte_eth_promiscuous_enable(portid);
}
+ for (i = 0; i < n_rx_thread; i++) {
+ lcore_id = rx_thread[i].conf.lcore_id;
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;
+
+ /* check if hw packet type is supported */
+ for (queue = 0; queue < rx_thread[i].n_rx_queue; ++queue) {
+ portid = rx_thread[i].rx_queue_list[queue].port_id;
+ queueid = rx_thread[i].rx_queue_list[queue].queue_id;
+
+ if (parse_ptype_on) {
+ if (!rte_eth_add_rx_callback(portid, queueid,
+ cb_parse_ptype, NULL))
+ rte_exit(EXIT_FAILURE,
+ "Failed to add rx callback: "
+ "port=%d\n", portid);
+ } else if (!check_ptype(portid))
+ rte_exit(EXIT_FAILURE,
+ "Port %d cannot parse packet type.\n\n"
+ "Please add --parse-ptype to use sw "
+ "packet type analyzer.\n\n",
+ portid);
+ }
+ }
+
check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
if (lthreads_on) {
diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c
index f0357218..850b009d 100644
--- a/examples/performance-thread/pthread_shim/main.c
+++ b/examples/performance-thread/pthread_shim/main.c
@@ -59,6 +59,10 @@
#define DEBUG_APP 0
#define HELLOW_WORLD_MAX_LTHREADS 10
+#ifndef __GLIBC__ /* sched_getcpu() is glibc-specific */
+#define sched_getcpu() rte_lcore_id()
+#endif
+
__thread int print_count;
__thread pthread_mutex_t print_lock;
@@ -175,12 +179,12 @@ static void initial_lthread(void *args __attribute__((unused)))
* use an attribute to pass the desired lcore
*/
pthread_attr_t attr;
- cpu_set_t cpuset;
+ rte_cpuset_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(lcore, &cpuset);
pthread_attr_init(&attr);
- pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
+ pthread_attr_setaffinity_np(&attr, sizeof(rte_cpuset_t), &cpuset);
/* create the thread */
pthread_create(&tid[i], &attr, helloworld_pthread, (void *) i);
diff --git a/examples/performance-thread/pthread_shim/pthread_shim.c b/examples/performance-thread/pthread_shim/pthread_shim.c
index 0d6100c9..113bafa0 100644
--- a/examples/performance-thread/pthread_shim/pthread_shim.c
+++ b/examples/performance-thread/pthread_shim/pthread_shim.c
@@ -48,6 +48,21 @@
#define POSIX_ERRNO(x) (x)
+/* some releases of FreeBSD 10, e.g. 10.0, don't have CPU_COUNT macro */
+#ifndef CPU_COUNT
+#define CPU_COUNT(x) __cpu_count(x)
+
+static inline unsigned int
+__cpu_count(const rte_cpuset_t *cpuset)
+{
+ unsigned int i, count = 0;
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (CPU_ISSET(i, cpuset))
+ count++;
+ return count;
+}
+#endif
+
/*
* this flag determines at run time if we override pthread
* calls and map then to equivalent lthread calls
@@ -159,7 +174,7 @@ int (*f_pthread_setschedparam)
int (*f_pthread_yield)
(void);
int (*f_pthread_setaffinity_np)
- (pthread_t thread, size_t cpusetsize, const cpu_set_t *cpuset);
+ (pthread_t thread, size_t cpusetsize, const rte_cpuset_t *cpuset);
int (*f_nanosleep)
(const struct timespec *req, struct timespec *rem);
} _sys_pthread_funcs = {
@@ -390,11 +405,11 @@ pthread_create(pthread_t *__restrict tid,
if (attr != NULL) {
/* determine CPU being requested */
- cpu_set_t cpuset;
+ rte_cpuset_t cpuset;
CPU_ZERO(&cpuset);
pthread_attr_getaffinity_np(attr,
- sizeof(cpu_set_t),
+ sizeof(rte_cpuset_t),
&cpuset);
if (CPU_COUNT(&cpuset) != 1)
@@ -576,15 +591,26 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *a)
return _sys_pthread_funcs.f_pthread_rwlock_wrlock(a);
}
-int pthread_yield(void)
+#ifdef RTE_EXEC_ENV_LINUXAPP
+int
+pthread_yield(void)
{
if (override) {
lthread_yield();
return 0;
}
return _sys_pthread_funcs.f_pthread_yield();
-
}
+#else
+void
+pthread_yield(void)
+{
+ if (override)
+ lthread_yield();
+ else
+ _sys_pthread_funcs.f_pthread_yield();
+}
+#endif
pthread_t pthread_self(void)
{
@@ -686,7 +712,7 @@ int nanosleep(const struct timespec *req, struct timespec *rem)
int
pthread_setaffinity_np(pthread_t thread, size_t cpusetsize,
- const cpu_set_t *cpuset)
+ const rte_cpuset_t *cpuset)
{
if (override) {
/* we only allow affinity with a single CPU */
diff --git a/examples/performance-thread/pthread_shim/pthread_shim.h b/examples/performance-thread/pthread_shim/pthread_shim.h
index 78bbb5ac..10f87894 100644
--- a/examples/performance-thread/pthread_shim/pthread_shim.h
+++ b/examples/performance-thread/pthread_shim/pthread_shim.h
@@ -33,7 +33,8 @@
#ifndef _PTHREAD_SHIM_H_
#define _PTHREAD_SHIM_H_
-#include <pthread.h>
+
+#include <rte_lcore.h>
/*
* This pthread shim is an example that demonstrates how legacy code
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 0af4f3b6..a80961d3 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -708,7 +708,7 @@ ptp_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
- optind = 0; /* Reset getopt lib. */
+ optind = 1; /* Reset getopt lib. */
return 0;
}
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index 15656155..d8a2107d 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -89,7 +89,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1,
.hw_vlan_filter = 0,
.jumbo_frame = 0,
- .hw_strip_crc = 0,
+ .hw_strip_crc = 1,
},
.rx_adv_conf = {
.rss_conf = {
@@ -300,7 +300,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return 0;
}
diff --git a/examples/qos_sched/Makefile b/examples/qos_sched/Makefile
index f59645f5..e41ac500 100644
--- a/examples/qos_sched/Makefile
+++ b/examples/qos_sched/Makefile
@@ -42,6 +42,7 @@ ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
+clean:
else
# binary name
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 70fdcdb2..15f117f5 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}
if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) != 0)) {
+ (void **)rx_mbufs, nb_rx, NULL) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);
@@ -179,8 +179,8 @@ app_tx_thread(struct thread_conf **confs)
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
- burst_conf.qos_dequeue);
- if (likely(retval == 0)) {
+ burst_conf.qos_dequeue, NULL);
+ if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
conf->counter = 0; /* reset empty read loop counter */
@@ -218,7 +218,7 @@ app_worker_thread(struct thread_conf **confs)
/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
@@ -230,7 +230,9 @@ app_worker_thread(struct thread_conf **confs)
nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
- while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
+ (void **)mbufs, nb_pkt, NULL) == 0)
+ ; /* empty body */
conf_idx++;
if (confs[conf_idx] == NULL)
@@ -252,7 +254,7 @@ app_mixed_thread(struct thread_conf **confs)
/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 70e12bb4..fe0221c6 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -92,7 +92,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_DCB_NONE,
diff --git a/examples/quota_watermark/qw/args.c b/examples/quota_watermark/qw/args.c
index 408b54d1..6ba77bc0 100644
--- a/examples/quota_watermark/qw/args.c
+++ b/examples/quota_watermark/qw/args.c
@@ -47,9 +47,9 @@ unsigned int portmask = 0;
static void
usage(const char *prgname)
{
- fprintf(stderr, "Usage: %s [EAL args] -- -p <portmask>\n"
- "-p PORTMASK: hexadecimal bitmask of NIC ports to configure\n",
- prgname);
+ fprintf(stderr, "Usage: %s [EAL args] -- -p <portmask>\n"
+ "-p PORTMASK: hexadecimal bitmask of NIC ports to configure\n",
+ prgname);
}
static unsigned long
@@ -61,44 +61,47 @@ parse_portmask(const char *portmask_str)
static void
check_core_count(void)
{
- if (rte_lcore_count() < 3)
- rte_exit(EXIT_FAILURE, "At least 3 cores need to be passed in the coremask\n");
+ if (rte_lcore_count() < 3)
+ rte_exit(EXIT_FAILURE,
+ "At least 3 cores need to be passed in the coremask\n");
}
static void
check_portmask_value(unsigned int portmask)
{
- unsigned int port_nb = 0;
+ unsigned int port_nb = 0;
- port_nb = __builtin_popcount(portmask);
+ port_nb = __builtin_popcount(portmask);
- if (port_nb == 0)
- rte_exit(EXIT_FAILURE, "At least 2 ports need to be passed in the portmask\n");
+ if (port_nb == 0)
+ rte_exit(EXIT_FAILURE,
+ "At least 2 ports need to be passed in the portmask\n");
- if (port_nb % 2 != 0)
- rte_exit(EXIT_FAILURE, "An even number of ports is required in the portmask\n");
+ if (port_nb % 2 != 0)
+ rte_exit(EXIT_FAILURE,
+ "An even number of ports is required in the portmask\n");
}
int
parse_qw_args(int argc, char **argv)
{
- int opt;
-
- while ((opt = getopt(argc, argv, "h:p:")) != -1) {
- switch (opt) {
- case 'h':
- usage(argv[0]);
- break;
- case 'p':
- portmask = parse_portmask(optarg);
- break;
- default:
- usage(argv[0]);
- }
- }
-
- check_core_count();
- check_portmask_value(portmask);
-
- return 0;
+ int opt;
+
+ while ((opt = getopt(argc, argv, "h:p:")) != -1) {
+ switch (opt) {
+ case 'h':
+ usage(argv[0]);
+ break;
+ case 'p':
+ portmask = parse_portmask(optarg);
+ break;
+ default:
+ usage(argv[0]);
+ }
+ }
+
+ check_core_count();
+ check_portmask_value(portmask);
+
+ return 0;
}
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index c2087218..b6264fcf 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -51,124 +51,128 @@
static const struct rte_eth_conf port_conf = {
- .rxmode = {
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
- },
- .txmode = {
- .mq_mode = ETH_DCB_NONE,
- },
+ .rxmode = {
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled */
+ .hw_ip_checksum = 0, /**< IP csum offload disabled */
+ .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+ .jumbo_frame = 0, /**< Jumbo Frame disabled */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ },
+ .txmode = {
+ .mq_mode = ETH_DCB_NONE,
+ },
};
static struct rte_eth_fc_conf fc_conf = {
- .mode = RTE_FC_TX_PAUSE,
- .high_water = 80 * 510 / 100,
- .low_water = 60 * 510 / 100,
- .pause_time = 1337,
- .send_xon = 0,
+ .mode = RTE_FC_TX_PAUSE,
+ .high_water = 80 * 510 / 100,
+ .low_water = 60 * 510 / 100,
+ .pause_time = 1337,
+ .send_xon = 0,
};
void configure_eth_port(uint8_t port_id)
{
- int ret;
-
- rte_eth_dev_stop(port_id);
-
- ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure port %u (error %d)\n",
- (unsigned) port_id, ret);
-
- /* Initialize the port's RX queue */
- ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE,
- rte_eth_dev_socket_id(port_id),
- NULL,
- mbuf_pool);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to setup RX queue on "
- "port %u (error %d)\n", (unsigned) port_id, ret);
-
- /* Initialize the port's TX queue */
- ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE,
- rte_eth_dev_socket_id(port_id),
- NULL);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to setup TX queue on "
- "port %u (error %d)\n", (unsigned) port_id, ret);
-
- /* Initialize the port's flow control */
- ret = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to setup hardware flow control on "
- "port %u (error %d)\n", (unsigned) port_id, ret);
-
- /* Start the port */
- ret = rte_eth_dev_start(port_id);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to start port %u (error %d)\n",
- (unsigned) port_id, ret);
-
- /* Put it in promiscuous mode */
- rte_eth_promiscuous_enable(port_id);
+ int ret;
+
+ rte_eth_dev_stop(port_id);
+
+ ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot configure port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Initialize the port's RX queue */
+ ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE,
+ rte_eth_dev_socket_id(port_id),
+ NULL,
+ mbuf_pool);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to setup RX queue on port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Initialize the port's TX queue */
+ ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE,
+ rte_eth_dev_socket_id(port_id),
+ NULL);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to setup TX queue on port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Initialize the port's flow control */
+ ret = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to setup hardware flow control on port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Start the port */
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Failed to start port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Put it in promiscuous mode */
+ rte_eth_promiscuous_enable(port_id);
}
void
init_dpdk(void)
{
- if (rte_eth_dev_count() < 2)
- rte_exit(EXIT_FAILURE, "Not enough ethernet port available\n");
+ if (rte_eth_dev_count() < 2)
+ rte_exit(EXIT_FAILURE, "Not enough ethernet port available\n");
}
void init_ring(int lcore_id, uint8_t port_id)
{
- struct rte_ring *ring;
- char ring_name[RTE_RING_NAMESIZE];
+ struct rte_ring *ring;
+ char ring_name[RTE_RING_NAMESIZE];
- snprintf(ring_name, RTE_RING_NAMESIZE,
- "core%d_port%d", lcore_id, port_id);
- ring = rte_ring_create(ring_name, RING_SIZE, rte_socket_id(),
- RING_F_SP_ENQ | RING_F_SC_DEQ);
+ snprintf(ring_name, RTE_RING_NAMESIZE,
+ "core%d_port%d", lcore_id, port_id);
+ ring = rte_ring_create(ring_name, RING_SIZE, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
- if (ring == NULL)
- rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ if (ring == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
- rte_ring_set_water_mark(ring, 80 * RING_SIZE / 100);
+ *high_watermark = 80 * RING_SIZE / 100;
- rings[lcore_id][port_id] = ring;
+ rings[lcore_id][port_id] = ring;
}
void
pair_ports(void)
{
- uint8_t i, j;
-
- /* Pair ports with their "closest neighbour" in the portmask */
- for (i = 0; i < RTE_MAX_ETHPORTS; i++)
- if (is_bit_set(i, portmask))
- for (j = (uint8_t) (i + 1); j < RTE_MAX_ETHPORTS; j++)
- if (is_bit_set(j, portmask)) {
- port_pairs[i] = j;
- port_pairs[j] = i;
- i = j;
- break;
- }
+ uint8_t i, j;
+
+ /* Pair ports with their "closest neighbour" in the portmask */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++)
+ if (is_bit_set(i, portmask))
+ for (j = (uint8_t) (i + 1); j < RTE_MAX_ETHPORTS; j++)
+ if (is_bit_set(j, portmask)) {
+ port_pairs[i] = j;
+ port_pairs[j] = i;
+ i = j;
+ break;
+ }
}
void
setup_shared_variables(void)
{
- const struct rte_memzone *qw_memzone;
+ const struct rte_memzone *qw_memzone;
- qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME, 2 * sizeof(int),
- rte_socket_id(), RTE_MEMZONE_2MB);
- if (qw_memzone == NULL)
- rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
+ 3 * sizeof(int), rte_socket_id(), 0);
+ if (qw_memzone == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
- quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ quota = qw_memzone->addr;
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 8ed02148..d4fcfde4 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -60,13 +60,14 @@
#define ETHER_TYPE_FLOW_CONTROL 0x8808
struct ether_fc_frame {
- uint16_t opcode;
- uint16_t param;
+ uint16_t opcode;
+ uint16_t param;
} __attribute__((__packed__));
int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;
uint8_t port_pairs[RTE_MAX_ETHPORTS];
@@ -76,38 +77,39 @@ struct rte_mempool *mbuf_pool;
static void send_pause_frame(uint8_t port_id, uint16_t duration)
{
- struct rte_mbuf *mbuf;
- struct ether_fc_frame *pause_frame;
- struct ether_hdr *hdr;
- struct ether_addr mac_addr;
+ struct rte_mbuf *mbuf;
+ struct ether_fc_frame *pause_frame;
+ struct ether_hdr *hdr;
+ struct ether_addr mac_addr;
- RTE_LOG(DEBUG, USER1, "Sending PAUSE frame (duration=%d) on port %d\n",
- duration, port_id);
+ RTE_LOG_DP(DEBUG, USER1,
+ "Sending PAUSE frame (duration=%d) on port %d\n",
+ duration, port_id);
- /* Get a mbuf from the pool */
- mbuf = rte_pktmbuf_alloc(mbuf_pool);
- if (unlikely(mbuf == NULL))
- return;
+ /* Get a mbuf from the pool */
+ mbuf = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(mbuf == NULL))
+ return;
- /* Prepare a PAUSE frame */
- hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- pause_frame = (struct ether_fc_frame *) &hdr[1];
+ /* Prepare a PAUSE frame */
+ hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ pause_frame = (struct ether_fc_frame *) &hdr[1];
- rte_eth_macaddr_get(port_id, &mac_addr);
- ether_addr_copy(&mac_addr, &hdr->s_addr);
+ rte_eth_macaddr_get(port_id, &mac_addr);
+ ether_addr_copy(&mac_addr, &hdr->s_addr);
- void *tmp = &hdr->d_addr.addr_bytes[0];
- *((uint64_t *)tmp) = 0x010000C28001ULL;
+ void *tmp = &hdr->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x010000C28001ULL;
- hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
+ hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
- pause_frame->opcode = rte_cpu_to_be_16(0x0001);
- pause_frame->param = rte_cpu_to_be_16(duration);
+ pause_frame->opcode = rte_cpu_to_be_16(0x0001);
+ pause_frame->param = rte_cpu_to_be_16(duration);
- mbuf->pkt_len = 60;
- mbuf->data_len = 60;
+ mbuf->pkt_len = 60;
+ mbuf->data_len = 60;
- rte_eth_tx_burst(port_id, 0, &mbuf, 1);
+ rte_eth_tx_burst(port_id, 0, &mbuf, 1);
}
/**
@@ -121,13 +123,13 @@ static void send_pause_frame(uint8_t port_id, uint16_t duration)
static unsigned int
get_previous_lcore_id(unsigned int lcore_id)
{
- int i;
+ int i;
- for (i = lcore_id - 1; i >= 0; i--)
- if (rte_lcore_is_enabled(i))
- return i;
+ for (i = lcore_id - 1; i >= 0; i--)
+ if (rte_lcore_is_enabled(i))
+ return i;
- return -1;
+ return -1;
}
/**
@@ -139,125 +141,137 @@ get_previous_lcore_id(unsigned int lcore_id)
static unsigned int
get_last_lcore_id(void)
{
- int i;
+ int i;
- for (i = RTE_MAX_LCORE; i >= 0; i--)
- if (rte_lcore_is_enabled(i))
- return i;
+ for (i = RTE_MAX_LCORE; i >= 0; i--)
+ if (rte_lcore_is_enabled(i))
+ return i;
- return 0;
+ return 0;
}
static void
receive_stage(__attribute__((unused)) void *args)
{
- int i, ret;
+ int i, ret;
- uint8_t port_id;
- uint16_t nb_rx_pkts;
+ uint8_t port_id;
+ uint16_t nb_rx_pkts;
- unsigned int lcore_id;
+ unsigned int lcore_id;
+ unsigned int free;
- struct rte_mbuf *pkts[MAX_PKT_QUOTA];
- struct rte_ring *ring;
- enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
+ struct rte_mbuf *pkts[MAX_PKT_QUOTA];
+ struct rte_ring *ring;
+ enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
- lcore_id = rte_lcore_id();
+ lcore_id = rte_lcore_id();
- RTE_LOG(INFO, USER1,
- "%s() started on core %u\n", __func__, lcore_id);
+ RTE_LOG(INFO, USER1,
+ "%s() started on core %u\n", __func__, lcore_id);
- while (1) {
+ while (1) {
- /* Process each port round robin style */
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ /* Process each port round robin style */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
+ if (!is_bit_set(port_id, portmask))
+ continue;
- ring = rings[lcore_id][port_id];
+ ring = rings[lcore_id][port_id];
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(ring) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(ring) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
- /* Enqueue received packets on the RX ring */
- nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, (uint16_t) *quota);
- ret = rte_ring_enqueue_bulk(ring, (void *) pkts, nb_rx_pkts);
- if (ret == -EDQUOT) {
- ring_state[port_id] = RING_OVERLOADED;
- send_pause_frame(port_id, 1337);
- }
+ /* Enqueue received packets on the RX ring */
+ nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
+ (uint16_t) *quota);
+ ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
+ ring_state[port_id] = RING_OVERLOADED;
+ send_pause_frame(port_id, 1337);
+ }
- else if (ret == -ENOBUFS) {
+ if (ret == 0) {
- /* Return mbufs to the pool, effectively dropping packets */
- for (i = 0; i < nb_rx_pkts; i++)
- rte_pktmbuf_free(pkts[i]);
- }
- }
- }
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_rx_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+ }
+ }
}
static void
pipeline_stage(__attribute__((unused)) void *args)
{
- int i, ret;
- int nb_dq_pkts;
+ int i, ret;
+ int nb_dq_pkts;
- uint8_t port_id;
+ uint8_t port_id;
- unsigned int lcore_id, previous_lcore_id;
+ unsigned int lcore_id, previous_lcore_id;
+ unsigned int free;
- void *pkts[MAX_PKT_QUOTA];
- struct rte_ring *rx, *tx;
- enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
+ void *pkts[MAX_PKT_QUOTA];
+ struct rte_ring *rx, *tx;
+ enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
- lcore_id = rte_lcore_id();
- previous_lcore_id = get_previous_lcore_id(lcore_id);
+ lcore_id = rte_lcore_id();
+ previous_lcore_id = get_previous_lcore_id(lcore_id);
- RTE_LOG(INFO, USER1,
- "%s() started on core %u - processing packets from core %u\n",
- __func__, lcore_id, previous_lcore_id);
+ RTE_LOG(INFO, USER1,
+ "%s() started on core %u - processing packets from core %u\n",
+ __func__, lcore_id, previous_lcore_id);
- while (1) {
+ while (1) {
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
+ if (!is_bit_set(port_id, portmask))
+ continue;
- tx = rings[lcore_id][port_id];
- rx = rings[previous_lcore_id][port_id];
+ tx = rings[lcore_id][port_id];
+ rx = rings[previous_lcore_id][port_id];
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(tx) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(tx) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
- /* Dequeue up to quota mbuf from rx */
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
- if (unlikely(nb_dq_pkts < 0))
- continue;
+ /* Dequeue up to quota mbuf from rx */
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
+ if (unlikely(nb_dq_pkts < 0))
+ continue;
- /* Enqueue them on tx */
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
- ring_state[port_id] = RING_OVERLOADED;
+ /* Enqueue them on tx */
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
+ ring_state[port_id] = RING_OVERLOADED;
- else if (ret == -ENOBUFS) {
+ if (ret == 0) {
- /* Return mbufs to the pool, effectively dropping packets */
- for (i = 0; i < nb_dq_pkts; i++)
- rte_pktmbuf_free(pkts[i]);
- }
- }
- }
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_dq_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+ }
+ }
}
static void
@@ -265,108 +279,114 @@ send_stage(__attribute__((unused)) void *args)
{
uint16_t nb_dq_pkts;
- uint8_t port_id;
- uint8_t dest_port_id;
+ uint8_t port_id;
+ uint8_t dest_port_id;
- unsigned int lcore_id, previous_lcore_id;
+ unsigned int lcore_id, previous_lcore_id;
- struct rte_ring *tx;
- struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
+ struct rte_ring *tx;
+ struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
- lcore_id = rte_lcore_id();
- previous_lcore_id = get_previous_lcore_id(lcore_id);
+ lcore_id = rte_lcore_id();
+ previous_lcore_id = get_previous_lcore_id(lcore_id);
- RTE_LOG(INFO, USER1,
- "%s() started on core %u - processing packets from core %u\n",
- __func__, lcore_id, previous_lcore_id);
+ RTE_LOG(INFO, USER1,
+ "%s() started on core %u - processing packets from core %u\n",
+ __func__, lcore_id, previous_lcore_id);
- while (1) {
+ while (1) {
- /* Process each ring round robin style */
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ /* Process each ring round robin style */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
+ if (!is_bit_set(port_id, portmask))
+ continue;
- dest_port_id = port_pairs[port_id];
- tx = rings[previous_lcore_id][port_id];
+ dest_port_id = port_pairs[port_id];
+ tx = rings[previous_lcore_id][port_id];
- if (rte_ring_empty(tx))
- continue;
+ if (rte_ring_empty(tx))
+ continue;
- /* Dequeue packets from tx and send them */
- nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx, (void *) tx_pkts, *quota);
- rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
+ /* Dequeue packets from tx and send them */
+ nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
+ (void *) tx_pkts, *quota, NULL);
+ rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
- /* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
- }
- }
+ /* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
+ }
+ }
}
int
main(int argc, char **argv)
{
- int ret;
- unsigned int lcore_id, master_lcore_id, last_lcore_id;
+ int ret;
+ unsigned int lcore_id, master_lcore_id, last_lcore_id;
- uint8_t port_id;
+ uint8_t port_id;
- rte_set_log_level(RTE_LOG_INFO);
+ rte_log_set_global_level(RTE_LOG_INFO);
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
- argc -= ret;
- argv += ret;
+ argc -= ret;
+ argv += ret;
- init_dpdk();
- setup_shared_variables();
+ init_dpdk();
+ setup_shared_variables();
- *quota = 32;
- *low_watermark = 60 * RING_SIZE / 100;
+ *quota = 32;
+ *low_watermark = 60 * RING_SIZE / 100;
- last_lcore_id = get_last_lcore_id();
- master_lcore_id = rte_get_master_lcore();
+ last_lcore_id = get_last_lcore_id();
+ master_lcore_id = rte_get_master_lcore();
- /* Parse the application's arguments */
- ret = parse_qw_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
+ /* Parse the application's arguments */
+ ret = parse_qw_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
- /* Create a pool of mbuf to store packets */
- mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
- MBUF_DATA_SIZE, rte_socket_id());
- if (mbuf_pool == NULL)
- rte_panic("%s\n", rte_strerror(rte_errno));
+ /* Create a pool of mbuf to store packets */
+ mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
+ MBUF_DATA_SIZE, rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_panic("%s\n", rte_strerror(rte_errno));
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
- if (is_bit_set(port_id, portmask)) {
- configure_eth_port(port_id);
- init_ring(master_lcore_id, port_id);
- }
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
+ if (is_bit_set(port_id, portmask)) {
+ configure_eth_port(port_id);
+ init_ring(master_lcore_id, port_id);
+ }
- pair_ports();
+ pair_ports();
- /* Start pipeline_connect() on all the available slave lcore but the last */
- for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
- if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id) {
+ /*
+ * Start pipeline_connect() on all the available slave lcores
+ * but the last
+ */
+ for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id) &&
+ lcore_id != master_lcore_id) {
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
- if (is_bit_set(port_id, portmask))
- init_ring(lcore_id, port_id);
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
+ if (is_bit_set(port_id, portmask))
+ init_ring(lcore_id, port_id);
- /* typecast is a workaround for GCC 4.3 bug */
- rte_eal_remote_launch((int (*)(void *))pipeline_stage, NULL, lcore_id);
- }
- }
+ /* typecast is a workaround for GCC 4.3 bug */
+ rte_eal_remote_launch((int (*)(void *))pipeline_stage,
+ NULL, lcore_id);
+ }
+ }
- /* Start send_stage() on the last slave core */
- /* typecast is a workaround for GCC 4.3 bug */
- rte_eal_remote_launch((int (*)(void *))send_stage, NULL, last_lcore_id);
+ /* Start send_stage() on the last slave core */
+ /* typecast is a workaround for GCC 4.3 bug */
+ rte_eal_remote_launch((int (*)(void *))send_stage, NULL, last_lcore_id);
- /* Start receive_stage() on the master core */
- receive_stage(NULL);
+ /* Start receive_stage() on the master core */
+ receive_stage(NULL);
- return 0;
+ return 0;
}
diff --git a/examples/quota_watermark/qw/main.h b/examples/quota_watermark/qw/main.h
index 6b364898..8c8e3116 100644
--- a/examples/quota_watermark/qw/main.h
+++ b/examples/quota_watermark/qw/main.h
@@ -37,12 +37,13 @@
#include "../include/conf.h"
enum ring_state {
- RING_READY,
- RING_OVERLOADED,
+ RING_READY,
+ RING_OVERLOADED,
};
extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;
extern uint8_t port_pairs[RTE_MAX_ETHPORTS];
@@ -53,7 +54,7 @@ extern struct rte_mempool *mbuf_pool;
static inline int
is_bit_set(int i, unsigned int mask)
{
- return (1 << i) & mask;
+ return (1 << i) & mask;
}
#endif /* _MAIN_H_ */
diff --git a/examples/quota_watermark/qwctl/commands.c b/examples/quota_watermark/qwctl/commands.c
index 5348dd3d..5cac0e17 100644
--- a/examples/quota_watermark/qwctl/commands.c
+++ b/examples/quota_watermark/qwctl/commands.c
@@ -53,36 +53,36 @@
*/
struct cmd_help_tokens {
- cmdline_fixed_string_t verb;
+ cmdline_fixed_string_t verb;
};
cmdline_parse_token_string_t cmd_help_verb =
- TOKEN_STRING_INITIALIZER(struct cmd_help_tokens, verb, "help");
+ TOKEN_STRING_INITIALIZER(struct cmd_help_tokens, verb, "help");
static void
cmd_help_handler(__attribute__((unused)) void *parsed_result,
- struct cmdline *cl,
- __attribute__((unused)) void *data)
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
{
- cmdline_printf(cl, "Available commands:\n"
- "- help\n"
- "- set [ring_name|variable] <value>\n"
- "- show [ring_name|variable]\n"
- "\n"
- "Available variables:\n"
- "- low_watermark\n"
- "- quota\n"
- "- ring names follow the core%%u_port%%u format\n");
+ cmdline_printf(cl, "Available commands:\n"
+ "- help\n"
+ "- set [ring_name|variable] <value>\n"
+ "- show [ring_name|variable]\n"
+ "\n"
+ "Available variables:\n"
+ "- low_watermark\n"
+ "- quota\n"
+ "- ring names follow the core%%u_port%%u format\n");
}
cmdline_parse_inst_t cmd_help = {
- .f = cmd_help_handler,
- .data = NULL,
- .help_str = "show help",
- .tokens = {
- (void *) &cmd_help_verb,
- NULL,
- },
+ .f = cmd_help_handler,
+ .data = NULL,
+ .help_str = "show help",
+ .tokens = {
+ (void *) &cmd_help_verb,
+ NULL,
+ },
};
@@ -91,69 +91,74 @@ cmdline_parse_inst_t cmd_help = {
*/
struct cmd_set_tokens {
- cmdline_fixed_string_t verb;
- cmdline_fixed_string_t variable;
- uint32_t value;
+ cmdline_fixed_string_t verb;
+ cmdline_fixed_string_t variable;
+ uint32_t value;
};
cmdline_parse_token_string_t cmd_set_verb =
- TOKEN_STRING_INITIALIZER(struct cmd_set_tokens, verb, "set");
+ TOKEN_STRING_INITIALIZER(struct cmd_set_tokens, verb, "set");
cmdline_parse_token_string_t cmd_set_variable =
- TOKEN_STRING_INITIALIZER(struct cmd_set_tokens, variable, NULL);
+ TOKEN_STRING_INITIALIZER(struct cmd_set_tokens, variable, NULL);
cmdline_parse_token_num_t cmd_set_value =
- TOKEN_NUM_INITIALIZER(struct cmd_set_tokens, value, UINT32);
+ TOKEN_NUM_INITIALIZER(struct cmd_set_tokens, value, UINT32);
static void
cmd_set_handler(__attribute__((unused)) void *parsed_result,
- struct cmdline *cl,
- __attribute__((unused)) void *data)
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
{
- struct cmd_set_tokens *tokens = parsed_result;
- struct rte_ring *ring;
-
- if (!strcmp(tokens->variable, "quota")) {
-
- if (tokens->value > 0 && tokens->value <= MAX_PKT_QUOTA)
- *quota = tokens->value;
- else
- cmdline_printf(cl, "quota must be between 1 and %u\n", MAX_PKT_QUOTA);
- }
-
- else if (!strcmp(tokens->variable, "low_watermark")) {
-
- if (tokens->value <= 100)
- *low_watermark = tokens->value * RING_SIZE / 100;
- else
- cmdline_printf(cl, "low_watermark must be between 0%% and 100%%\n");
- }
-
- else {
-
- ring = rte_ring_lookup(tokens->variable);
- if (ring == NULL)
- cmdline_printf(cl, "Cannot find ring \"%s\"\n", tokens->variable);
- else
- if (tokens->value >= *low_watermark * 100 / RING_SIZE
- && tokens->value <= 100)
- rte_ring_set_water_mark(ring, tokens->value * RING_SIZE / 100);
- else
- cmdline_printf(cl, "ring high watermark must be between %u%% "
- "and 100%%\n", *low_watermark * 100 / RING_SIZE);
- }
+ struct cmd_set_tokens *tokens = parsed_result;
+ struct rte_ring *ring;
+
+ if (!strcmp(tokens->variable, "quota")) {
+
+ if (tokens->value > 0 && tokens->value <= MAX_PKT_QUOTA)
+ *quota = tokens->value;
+ else
+ cmdline_printf(cl, "quota must be between 1 and %u\n",
+ MAX_PKT_QUOTA);
+ }
+
+ else if (!strcmp(tokens->variable, "low_watermark")) {
+
+ if (tokens->value <= 100)
+ *low_watermark = tokens->value * RING_SIZE / 100;
+ else
+ cmdline_printf(cl,
+ "low_watermark must be between 0%% and 100%%\n");
+ }
+
+ else {
+
+ ring = rte_ring_lookup(tokens->variable);
+ if (ring == NULL)
+ cmdline_printf(cl, "Cannot find ring \"%s\"\n",
+ tokens->variable);
+ else
+ if (tokens->value >= *low_watermark * 100 / RING_SIZE
+ && tokens->value <= 100)
+ *high_watermark = tokens->value *
+ RING_SIZE / 100;
+ else
+ cmdline_printf(cl,
+ "ring high watermark must be between %u%% and 100%%\n",
+ *low_watermark * 100 / RING_SIZE);
+ }
}
cmdline_parse_inst_t cmd_set = {
- .f = cmd_set_handler,
- .data = NULL,
- .help_str = "Set a variable value",
- .tokens = {
- (void *) &cmd_set_verb,
- (void *) &cmd_set_variable,
- (void *) &cmd_set_value,
- NULL,
- },
+ .f = cmd_set_handler,
+ .data = NULL,
+ .help_str = "Set a variable value",
+ .tokens = {
+ (void *) &cmd_set_verb,
+ (void *) &cmd_set_variable,
+ (void *) &cmd_set_value,
+ NULL,
+ },
};
@@ -162,56 +167,59 @@ cmdline_parse_inst_t cmd_set = {
*/
struct cmd_show_tokens {
- cmdline_fixed_string_t verb;
- cmdline_fixed_string_t variable;
+ cmdline_fixed_string_t verb;
+ cmdline_fixed_string_t variable;
};
cmdline_parse_token_string_t cmd_show_verb =
- TOKEN_STRING_INITIALIZER(struct cmd_show_tokens, verb, "show");
+ TOKEN_STRING_INITIALIZER(struct cmd_show_tokens, verb, "show");
cmdline_parse_token_string_t cmd_show_variable =
- TOKEN_STRING_INITIALIZER(struct cmd_show_tokens, variable, NULL);
+ TOKEN_STRING_INITIALIZER(struct cmd_show_tokens,
+ variable, NULL);
static void
cmd_show_handler(__attribute__((unused)) void *parsed_result,
- struct cmdline *cl,
- __attribute__((unused)) void *data)
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
{
- struct cmd_show_tokens *tokens = parsed_result;
- struct rte_ring *ring;
+ struct cmd_show_tokens *tokens = parsed_result;
+ struct rte_ring *ring;
- if (!strcmp(tokens->variable, "quota"))
- cmdline_printf(cl, "Global quota: %d\n", *quota);
+ if (!strcmp(tokens->variable, "quota"))
+ cmdline_printf(cl, "Global quota: %d\n", *quota);
- else if (!strcmp(tokens->variable, "low_watermark"))
- cmdline_printf(cl, "Global low_watermark: %u\n", *low_watermark);
+ else if (!strcmp(tokens->variable, "low_watermark"))
+ cmdline_printf(cl, "Global low_watermark: %u\n",
+ *low_watermark);
- else {
+ else {
- ring = rte_ring_lookup(tokens->variable);
- if (ring == NULL)
- cmdline_printf(cl, "Cannot find ring \"%s\"\n", tokens->variable);
- else
- rte_ring_dump(stdout, ring);
- }
+ ring = rte_ring_lookup(tokens->variable);
+ if (ring == NULL)
+ cmdline_printf(cl, "Cannot find ring \"%s\"\n",
+ tokens->variable);
+ else
+ rte_ring_dump(stdout, ring);
+ }
}
cmdline_parse_inst_t cmd_show = {
- .f = cmd_show_handler,
- .data = NULL,
- .help_str = "Show a variable value",
- .tokens = {
- (void *) &cmd_show_verb,
- (void *) &cmd_show_variable,
- NULL,
- },
+ .f = cmd_show_handler,
+ .data = NULL,
+ .help_str = "Show a variable value",
+ .tokens = {
+ (void *) &cmd_show_verb,
+ (void *) &cmd_show_variable,
+ NULL,
+ },
};
cmdline_parse_ctx_t qwctl_ctx[] = {
- (cmdline_parse_inst_t *)&cmd_help,
- (cmdline_parse_inst_t *)&cmd_set,
- (cmdline_parse_inst_t *)&cmd_show,
- NULL,
+ (cmdline_parse_inst_t *)&cmd_help,
+ (cmdline_parse_inst_t *)&cmd_set,
+ (cmdline_parse_inst_t *)&cmd_show,
+ NULL,
};
diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c
index 29c501ca..18ec17a1 100644
--- a/examples/quota_watermark/qwctl/qwctl.c
+++ b/examples/quota_watermark/qwctl/qwctl.c
@@ -55,40 +55,42 @@
int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;
static void
setup_shared_variables(void)
{
- const struct rte_memzone *qw_memzone;
+ const struct rte_memzone *qw_memzone;
- qw_memzone = rte_memzone_lookup(QUOTA_WATERMARK_MEMZONE_NAME);
- if (qw_memzone == NULL)
- rte_exit(EXIT_FAILURE, "Couldn't find memzone\n");
+ qw_memzone = rte_memzone_lookup(QUOTA_WATERMARK_MEMZONE_NAME);
+ if (qw_memzone == NULL)
+ rte_exit(EXIT_FAILURE, "Couldn't find memzone\n");
- quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ quota = qw_memzone->addr;
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
int main(int argc, char **argv)
{
- int ret;
- struct cmdline *cl;
+ int ret;
+ struct cmdline *cl;
- rte_set_log_level(RTE_LOG_INFO);
+ rte_log_set_global_level(RTE_LOG_INFO);
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
- setup_shared_variables();
+ setup_shared_variables();
- cl = cmdline_stdin_new(qwctl_ctx, "qwctl> ");
- if (cl == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create cmdline instance\n");
+ cl = cmdline_stdin_new(qwctl_ctx, "qwctl> ");
+ if (cl == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create cmdline instance\n");
- cmdline_interact(cl);
- cmdline_stdin_exit(cl);
+ cmdline_interact(cl);
+ cmdline_stdin_exit(cl);
- return 0;
+ return 0;
}
diff --git a/examples/quota_watermark/qwctl/qwctl.h b/examples/quota_watermark/qwctl/qwctl.h
index 8d146e57..545914b3 100644
--- a/examples/quota_watermark/qwctl/qwctl.h
+++ b/examples/quota_watermark/qwctl/qwctl.h
@@ -36,5 +36,6 @@
extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;
#endif /* _MAIN_H_ */
diff --git a/examples/server_node_efd/Makefile b/examples/server_node_efd/Makefile
new file mode 100644
index 00000000..6977ef93
--- /dev/null
+++ b/examples/server_node_efd/Makefile
@@ -0,0 +1,44 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += server
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += node
+
+include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/server_node_efd/node/Makefile b/examples/server_node_efd/node/Makefile
new file mode 100644
index 00000000..8cf7b650
--- /dev/null
+++ b/examples/server_node_efd/node/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = node
+
+# all source are stored in SRCS-y
+SRCS-y := node.c
+
+CFLAGS += $(WERROR_FLAGS) -O3
+CFLAGS += -I$(SRCDIR)/../shared
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
new file mode 100644
index 00000000..f780b926
--- /dev/null
+++ b/examples/server_node_efd/node/node.c
@@ -0,0 +1,417 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_log.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_ring.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_ip.h>
+
+#include "common.h"
+
+/* Number of packets to attempt to read from queue */
+#define PKT_READ_SIZE ((uint16_t)32)
+
+/*
+ * Our node id number - tells us which rx queue to read, and NIC TX
+ * queue to write to.
+ */
+static uint8_t node_id;
+
+#define MBQ_CAPACITY 32
+
+/* maps input ports to output ports for packets */
+static uint8_t output_ports[RTE_MAX_ETHPORTS];
+
+/* buffers up a set of packet that are ready to send */
+struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+
+/* shared data from server. We update statistics here */
+static struct tx_stats *tx_stats;
+
+static struct filter_stats *filter_stats;
+
+/*
+ * print a usage message
+ */
+static void
+usage(const char *progname)
+{
+ printf("Usage: %s [EAL args] -- -n <node_id>\n\n", progname);
+}
+
+/*
+ * Convert the node id number from a string to an int.
+ */
+static int
+parse_node_num(const char *node)
+{
+ char *end = NULL;
+ unsigned long temp;
+
+ if (node == NULL || *node == '\0')
+ return -1;
+
+ temp = strtoul(node, &end, 10);
+ if (end == NULL || *end != '\0')
+ return -1;
+
+ node_id = (uint8_t)temp;
+ return 0;
+}
+
+/*
+ * Parse the application arguments to the node app.
+ */
+static int
+parse_app_args(int argc, char *argv[])
+{
+ int option_index, opt;
+ char **argvopt = argv;
+ const char *progname = NULL;
+ static struct option lgopts[] = { /* no long options */
+ {NULL, 0, 0, 0 }
+ };
+ progname = argv[0];
+
+ while ((opt = getopt_long(argc, argvopt, "n:", lgopts,
+ &option_index)) != EOF) {
+ switch (opt) {
+ case 'n':
+ if (parse_node_num(optarg) != 0) {
+ usage(progname);
+ return -1;
+ }
+ break;
+ default:
+ usage(progname);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Tx buffer error callback
+ */
+static void
+flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
+ void *userdata) {
+ int i;
+ uint8_t port_id = (uintptr_t)userdata;
+
+ tx_stats->tx_drop[port_id] += count;
+
+ /* free the mbufs which failed from transmit */
+ for (i = 0; i < count; i++)
+ rte_pktmbuf_free(unsent[i]);
+
+}
+
+static void
+configure_tx_buffer(uint8_t port_id, uint16_t size)
+{
+ int ret;
+
+ /* Initialize TX buffers */
+ tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(size), 0,
+ rte_eth_dev_socket_id(port_id));
+ if (tx_buffer[port_id] == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx "
+ "on port %u\n", (unsigned int) port_id);
+
+ rte_eth_tx_buffer_init(tx_buffer[port_id], size);
+
+ ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
+ flush_tx_error_callback, (void *)(intptr_t)port_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot set error callback for "
+ "tx buffer on port %u\n", (unsigned int) port_id);
+}
+
+/*
+ * set up output ports so that all traffic on port gets sent out
+ * its paired port. Index using actual port numbers since that is
+ * what comes in the mbuf structure.
+ */
+static void
+configure_output_ports(const struct shared_info *info)
+{
+ int i;
+
+ if (info->num_ports > RTE_MAX_ETHPORTS)
+ rte_exit(EXIT_FAILURE, "Too many ethernet ports. "
+ "RTE_MAX_ETHPORTS = %u\n",
+ (unsigned int)RTE_MAX_ETHPORTS);
+ for (i = 0; i < info->num_ports - 1; i += 2) {
+ uint8_t p1 = info->id[i];
+ uint8_t p2 = info->id[i+1];
+
+ output_ports[p1] = p2;
+ output_ports[p2] = p1;
+
+ configure_tx_buffer(p1, MBQ_CAPACITY);
+ configure_tx_buffer(p2, MBQ_CAPACITY);
+
+ }
+}
+
+/*
+ * Create the hash table that will contain the flows that
+ * the node will handle, which will be used to decide if packet
+ * is transmitted or dropped.
+ */
+static struct rte_hash *
+create_hash_table(const struct shared_info *info)
+{
+ uint32_t num_flows_node = info->num_flows / info->num_nodes;
+ char name[RTE_HASH_NAMESIZE];
+ struct rte_hash *h;
+
+ /* create table */
+ struct rte_hash_parameters hash_params = {
+ .entries = num_flows_node * 2, /* table load = 50% */
+ .key_len = sizeof(uint32_t), /* Store IPv4 dest IP address */
+ .socket_id = rte_socket_id(),
+ .hash_func_init_val = 0,
+ };
+
+ snprintf(name, sizeof(name), "hash_table_%d", node_id);
+ hash_params.name = name;
+ h = rte_hash_create(&hash_params);
+
+ if (h == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Problem creating the hash table for node %d\n",
+ node_id);
+ return h;
+}
+
+static void
+populate_hash_table(const struct rte_hash *h, const struct shared_info *info)
+{
+ unsigned int i;
+ int32_t ret;
+ uint32_t ip_dst;
+ uint32_t num_flows_node = 0;
+ uint64_t target_node;
+
+ /* Add flows in table */
+ for (i = 0; i < info->num_flows; i++) {
+ target_node = i % info->num_nodes;
+ if (target_node != node_id)
+ continue;
+
+ ip_dst = rte_cpu_to_be_32(i);
+
+ ret = rte_hash_add_key(h, (void *) &ip_dst);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Unable to add entry %u "
+ "in hash table\n", i);
+ else
+ num_flows_node++;
+
+ }
+
+ printf("Hash table: Adding 0x%x keys\n", num_flows_node);
+}
+
+/*
+ * This function performs routing of packets
+ * Just sends each input packet out an output port based solely on the input
+ * port it arrived on.
+ */
+static inline void
+transmit_packet(struct rte_mbuf *buf)
+{
+ int sent;
+ const uint8_t in_port = buf->port;
+ const uint8_t out_port = output_ports[in_port];
+ struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
+
+ sent = rte_eth_tx_buffer(out_port, node_id, buffer, buf);
+ if (sent)
+ tx_stats->tx[out_port] += sent;
+
+}
+
+static inline void
+handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ uint32_t ipv4_dst_ip[PKT_READ_SIZE];
+ const void *key_ptrs[PKT_READ_SIZE];
+ unsigned int i;
+ int32_t positions[PKT_READ_SIZE] = {0};
+
+ for (i = 0; i < num_packets; i++) {
+ /* Handle IPv4 header.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
+ key_ptrs[i] = &ipv4_dst_ip[i];
+ }
+ /* Check if packets belongs to any flows handled by this node */
+ rte_hash_lookup_bulk(h, key_ptrs, num_packets, positions);
+
+ for (i = 0; i < num_packets; i++) {
+ if (likely(positions[i] >= 0)) {
+ filter_stats->passed++;
+ transmit_packet(bufs[i]);
+ } else {
+ filter_stats->drop++;
+ /* Drop packet, as flow is not handled by this node */
+ rte_pktmbuf_free(bufs[i]);
+ }
+ }
+}
+
+/*
+ * Application main function - loops through
+ * receiving and processing packets. Never returns
+ */
+int
+main(int argc, char *argv[])
+{
+ const struct rte_memzone *mz;
+ struct rte_ring *rx_ring;
+ struct rte_hash *h;
+ struct rte_mempool *mp;
+ struct shared_info *info;
+ int need_flush = 0; /* indicates whether we have unsent packets */
+ int retval;
+ void *pkts[PKT_READ_SIZE];
+ uint16_t sent;
+
+ retval = rte_eal_init(argc, argv);
+ if (retval < 0)
+ return -1;
+ argc -= retval;
+ argv += retval;
+
+ if (parse_app_args(argc, argv) < 0)
+ rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
+
+ if (rte_eth_dev_count() == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+ rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
+ if (rx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
+ "is server process running?\n");
+
+ mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
+ if (mp == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
+
+ mz = rte_memzone_lookup(MZ_SHARED_INFO);
+ if (mz == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
+ info = mz->addr;
+ tx_stats = &(info->tx_stats[node_id]);
+ filter_stats = &(info->filter_stats[node_id]);
+
+ configure_output_ports(info);
+
+ h = create_hash_table(info);
+
+ populate_hash_table(h, info);
+
+ RTE_LOG(INFO, APP, "Finished Process Init.\n");
+
+ printf("\nNode process %d handling packets\n", node_id);
+ printf("[Press Ctrl-C to quit ...]\n");
+
+ for (;;) {
+ uint16_t rx_pkts = PKT_READ_SIZE;
+ uint8_t port;
+
+ /*
+ * Try dequeuing max possible packets first, if that fails,
+ * get the most we can. Loop body should only execute once,
+ * maximum
+ */
+ while (rx_pkts > 0 &&
+ unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
+ rx_pkts, NULL) == 0))
+ rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
+ PKT_READ_SIZE);
+
+ if (unlikely(rx_pkts == 0)) {
+ if (need_flush)
+ for (port = 0; port < info->num_ports; port++) {
+ sent = rte_eth_tx_buffer_flush(
+ info->id[port],
+ node_id,
+ tx_buffer[port]);
+ if (unlikely(sent))
+ tx_stats->tx[port] += sent;
+ }
+ need_flush = 0;
+ continue;
+ }
+
+ handle_packets(h, (struct rte_mbuf **)pkts, rx_pkts);
+
+ need_flush = 1;
+ }
+}
diff --git a/examples/dpdk_qat/Makefile b/examples/server_node_efd/server/Makefile
index 01d61bcf..a2f2f361 100644
--- a/examples/dpdk_qat/Makefile
+++ b/examples/server_node_efd/server/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -33,61 +33,25 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-ifeq ($(ICP_ROOT),)
-$(error "Please define ICP_ROOT environment variable")
-endif
-
-# Default target, can be overriden by command line or environment
+# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV), "linuxapp")
$(error This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
endif
-LBITS := $(shell uname -p)
-ifeq ($(CROSS_COMPILE),)
- ifneq ($(CONFIG_RTE_ARCH),"x86_64")
- ifneq ($(LBITS),i686)
- $(error The RTE_TARGET chosen is not compatible with this environment \
- (x86_64), for this application. Please change the definition of the \
- RTE_TARGET environment variable, or run the application on a i686 OS)
- endif
- endif
-endif
-
# binary name
-APP = dpdk_qat
+APP = server
# all source are stored in SRCS-y
-SRCS-y := main.c crypto.c
+SRCS-y := main.c init.c args.c
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -I$(ICP_ROOT)/quickassist/include \
- -I$(ICP_ROOT)/quickassist/include/lac \
- -I$(ICP_ROOT)/quickassist/lookaside/access_layer/include
+INC := $(wildcard *.h)
-# From CRF 1.2 driver, library was renamed to libicp_qa_al.a
-ifneq ($(wildcard $(ICP_ROOT)/build/icp_qa_al.a),)
-ICP_LIBRARY_PATH = $(ICP_ROOT)/build/icp_qa_al.a
-else
-ICP_LIBRARY_PATH = $(ICP_ROOT)/build/libicp_qa_al.a
-endif
-
-LDLIBS += -L$(ICP_ROOT)/build
-LDLIBS += $(ICP_LIBRARY_PATH) \
- -lz \
- -losal \
- -ladf_proxy \
- -lcrypto
-
-# workaround for a gcc bug with noreturn attribute
-# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
-ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
-CFLAGS_main.o += -Wno-return-type
-endif
+CFLAGS += $(WERROR_FLAGS) -O3
+CFLAGS += -I$(SRCDIR)/../shared
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/server_node_efd/server/args.c b/examples/server_node_efd/server/args.c
new file mode 100644
index 00000000..ee292038
--- /dev/null
+++ b/examples/server_node_efd/server/args.c
@@ -0,0 +1,200 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#include <rte_memory.h>
+#include <rte_string_fns.h>
+
+#include "common.h"
+#include "args.h"
+#include "init.h"
+
+/* 1M flows by default */
+#define DEFAULT_NUM_FLOWS 0x100000
+
+/* global var for number of nodes - extern in header */
+uint8_t num_nodes;
+/* global var for number of flows - extern in header */
+uint32_t num_flows = DEFAULT_NUM_FLOWS;
+
+static const char *progname;
+
+/**
+ * Prints out usage information to stdout
+ */
+static void
+usage(void)
+{
+ printf("%s [EAL options] -- -p PORTMASK -n NUM_NODES -f NUM_FLOWS\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to use\n"
+ " -n NUM_NODES: number of node processes to use\n"
+ " -f NUM_FLOWS: number of flows to be added in the EFD table\n",
+ progname);
+}
+
+/**
+ * The ports to be used by the application are passed in
+ * the form of a bitmask. This function parses the bitmask
+ * and places the port numbers to be used into the port[]
+ * array variable
+ */
+static int
+parse_portmask(uint8_t max_ports, const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+ uint8_t count = 0;
+
+ if (portmask == NULL || *portmask == '\0')
+ return -1;
+
+ /* convert parameter to a number and verify */
+ pm = strtoul(portmask, &end, 16);
+ if (end == NULL || *end != '\0' || pm == 0)
+ return -1;
+
+ /* loop through bits of the mask and mark ports */
+ while (pm != 0) {
+ if (pm & 0x01) { /* bit is set in mask, use port */
+ if (count >= max_ports)
+ printf("WARNING: requested port %u not present"
+ " - ignoring\n", (unsigned int)count);
+ else
+ info->id[info->num_ports++] = count;
+ }
+ pm = (pm >> 1);
+ count++;
+ }
+
+ return 0;
+}
+
+/**
+ * Take the number of nodes parameter passed to the app
+ * and convert to a number to store in the num_nodes variable
+ */
+static int
+parse_num_nodes(const char *nodes)
+{
+ char *end = NULL;
+ unsigned long temp;
+
+ if (nodes == NULL || *nodes == '\0')
+ return -1;
+
+ temp = strtoul(nodes, &end, 10);
+ if (end == NULL || *end != '\0' || temp == 0)
+ return -1;
+
+ num_nodes = (uint8_t)temp;
+ return 0;
+}
+
+static int
+parse_num_flows(const char *flows)
+{
+ char *end = NULL;
+
+ /* parse hexadecimal string */
+ num_flows = strtoul(flows, &end, 16);
+ if ((flows[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (num_flows == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * The application specific arguments follow the DPDK-specific
+ * arguments which are stripped by the DPDK init. This function
+ * processes these application arguments, printing usage info
+ * on error.
+ */
+int
+parse_app_args(uint8_t max_ports, int argc, char *argv[])
+{
+ int option_index, opt;
+ char **argvopt = argv;
+ static struct option lgopts[] = { /* no long options */
+ {NULL, 0, 0, 0 }
+ };
+ progname = argv[0];
+
+ while ((opt = getopt_long(argc, argvopt, "n:f:p:", lgopts,
+ &option_index)) != EOF) {
+ switch (opt) {
+ case 'p':
+ if (parse_portmask(max_ports, optarg) != 0) {
+ usage();
+ return -1;
+ }
+ break;
+ case 'n':
+ if (parse_num_nodes(optarg) != 0) {
+ usage();
+ return -1;
+ }
+ break;
+ case 'f':
+ if (parse_num_flows(optarg) != 0) {
+ usage();
+ return -1;
+ }
+ break;
+ default:
+ printf("ERROR: Unknown option '%c'\n", opt);
+ usage();
+ return -1;
+ }
+ }
+
+ if (info->num_ports == 0 || num_nodes == 0) {
+ usage();
+ return -1;
+ }
+
+ if (info->num_ports % 2 != 0) {
+ printf("ERROR: application requires an even "
+ "number of ports to use\n");
+ return -1;
+ }
+ return 0;
+}
diff --git a/examples/server_node_efd/server/args.h b/examples/server_node_efd/server/args.h
new file mode 100644
index 00000000..cacf3957
--- /dev/null
+++ b/examples/server_node_efd/server/args.h
@@ -0,0 +1,39 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARGS_H_
+#define _ARGS_H_
+
+int parse_app_args(uint8_t max_ports, int argc, char *argv[]);
+
+#endif /* ifndef _ARGS_H_ */
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
new file mode 100644
index 00000000..82457b44
--- /dev/null
+++ b/examples/server_node_efd/server/init.c
@@ -0,0 +1,371 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <rte_memcpy.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_efd.h>
+#include <rte_hash.h>
+
+#include "common.h"
+#include "args.h"
+#include "init.h"
+
+#define MBUFS_PER_NODE 1536
+#define MBUFS_PER_PORT 1536
+#define MBUF_CACHE_SIZE 512
+
+#define RTE_MP_RX_DESC_DEFAULT 512
+#define RTE_MP_TX_DESC_DEFAULT 512
+#define NODE_QUEUE_RINGSIZE 128
+
+#define NO_FLAGS 0
+
+/* The mbuf pool for packet rx */
+struct rte_mempool *pktmbuf_pool;
+
+/* array of info/queues for nodes */
+struct node *nodes;
+
+/* EFD table */
+struct rte_efd_table *efd_table;
+
+/* Shared info between server and nodes */
+struct shared_info *info;
+
+/**
+ * Initialise the mbuf pool for packet reception for the NIC, and any other
+ * buffer pools needed by the app - currently none.
+ */
+static int
+init_mbuf_pools(void)
+{
+ const unsigned int num_mbufs = (num_nodes * MBUFS_PER_NODE) +
+ (info->num_ports * MBUFS_PER_PORT);
+
+ /*
+ * Don't pass single-producer/single-consumer flags to mbuf create as it
+ * seems faster to use a cache instead
+ */
+ printf("Creating mbuf pool '%s' [%u mbufs] ...\n",
+ PKTMBUF_POOL_NAME, num_mbufs);
+ pktmbuf_pool = rte_pktmbuf_pool_create(PKTMBUF_POOL_NAME, num_mbufs,
+ MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+ return pktmbuf_pool == NULL; /* 0 on success */
+}
+
+/**
+ * Initialise an individual port:
+ * - configure number of rx and tx rings
+ * - set up each rx ring, to pull from the main mbuf pool
+ * - set up each tx ring
+ * - start the port and report its status to stdout
+ */
+static int
+init_port(uint8_t port_num)
+{
+ /* for port configuration all features are off by default */
+ const struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS
+ }
+ };
+ const uint16_t rx_rings = 1, tx_rings = num_nodes;
+ const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
+ const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
+
+ uint16_t q;
+ int retval;
+
+ printf("Port %u init ... ", (unsigned int)port_num);
+ fflush(stdout);
+
+ /*
+ * Standard DPDK port initialisation - config port, then set up
+ * rx and tx rings.
+ */
+ retval = rte_eth_dev_configure(port_num, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
+ rte_eth_dev_socket_id(port_num),
+ NULL, pktmbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size,
+ rte_eth_dev_socket_id(port_num),
+ NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ rte_eth_promiscuous_enable(port_num);
+
+ retval = rte_eth_dev_start(port_num);
+ if (retval < 0)
+ return retval;
+
+ printf("done:\n");
+
+ return 0;
+}
+
+/**
+ * Set up the DPDK rings which will be used to pass packets, via
+ * pointers, between the multi-process server and node processes.
+ * Each node needs one RX queue.
+ */
+static int
+init_shm_rings(void)
+{
+ unsigned int i;
+ unsigned int socket_id;
+ const char *q_name;
+ const unsigned int ringsize = NODE_QUEUE_RINGSIZE;
+
+ nodes = rte_malloc("node details",
+ sizeof(*nodes) * num_nodes, 0);
+ if (nodes == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate memory for "
+ "node program details\n");
+
+ for (i = 0; i < num_nodes; i++) {
+ /* Create an RX queue for each node */
+ socket_id = rte_socket_id();
+ q_name = get_rx_queue_name(i);
+ nodes[i].rx_q = rte_ring_create(q_name,
+ ringsize, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (nodes[i].rx_q == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create rx ring queue "
+ "for node %u\n", i);
+ }
+ return 0;
+}
+
+/*
+ * Create EFD table which will contain all the flows
+ * that will be distributed among the nodes
+ */
+static void
+create_efd_table(void)
+{
+ uint8_t socket_id = rte_socket_id();
+
+ /* create table */
+ efd_table = rte_efd_create("flow table", num_flows * 2, sizeof(uint32_t),
+ 1 << socket_id, socket_id);
+
+ if (efd_table == NULL)
+ rte_exit(EXIT_FAILURE, "Problem creating the flow table\n");
+}
+
+static void
+populate_efd_table(void)
+{
+ unsigned int i;
+ int32_t ret;
+ uint32_t ip_dst;
+ uint8_t socket_id = rte_socket_id();
+ uint64_t node_id;
+
+ /* Add flows in table */
+ for (i = 0; i < num_flows; i++) {
+ node_id = i % num_nodes;
+
+ ip_dst = rte_cpu_to_be_32(i);
+ ret = rte_efd_update(efd_table, socket_id,
+ (void *)&ip_dst, (efd_value_t)node_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Unable to add entry %u in "
+ "EFD table\n", i);
+ }
+
+ printf("EFD table: Adding 0x%x keys\n", num_flows);
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint8_t portid, count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ for (portid = 0; portid < port_num; portid++) {
+ if ((port_mask & (1 << info->id[portid])) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(info->id[portid], &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf("Port %d Link Up - speed %u "
+ "Mbps - %s\n", info->id[portid],
+ (unsigned int)link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ printf("Port %d Link Down\n",
+ (uint8_t)info->id[portid]);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+/**
+ * Main init function for the multi-process server app,
+ * calls subfunctions to do each stage of the initialisation.
+ */
+int
+init(int argc, char *argv[])
+{
+ int retval;
+ const struct rte_memzone *mz;
+ uint8_t i, total_ports;
+
+ /* init EAL, parsing EAL args */
+ retval = rte_eal_init(argc, argv);
+ if (retval < 0)
+ return -1;
+ argc -= retval;
+ argv += retval;
+
+ /* get total number of ports */
+ total_ports = rte_eth_dev_count();
+
+ /* set up array for port data */
+ mz = rte_memzone_reserve(MZ_SHARED_INFO, sizeof(*info),
+ rte_socket_id(), NO_FLAGS);
+ if (mz == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot reserve memory zone "
+ "for port information\n");
+ memset(mz->addr, 0, sizeof(*info));
+ info = mz->addr;
+
+ /* parse additional, application arguments */
+ retval = parse_app_args(total_ports, argc, argv);
+ if (retval != 0)
+ return -1;
+
+ /* initialise mbuf pools */
+ retval = init_mbuf_pools();
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");
+
+ /* now initialise the ports we will use */
+ for (i = 0; i < info->num_ports; i++) {
+ retval = init_port(info->id[i]);
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
+ (unsigned int) i);
+ }
+
+ check_all_ports_link_status(info->num_ports, (~0x0));
+
+ /* initialise the node queues/rings for inter-eu comms */
+ init_shm_rings();
+
+ /* Create the EFD table */
+ create_efd_table();
+
+ /* Populate the EFD table */
+ populate_efd_table();
+
+ /* Share the total number of nodes */
+ info->num_nodes = num_nodes;
+
+ /* Share the total number of flows */
+ info->num_flows = num_flows;
+ return 0;
+}
diff --git a/examples/dpdk_qat/crypto.h b/examples/server_node_efd/server/init.h
index f68b0b65..8dc5885b 100644
--- a/examples/dpdk_qat/crypto.h
+++ b/examples/server_node_efd/server/init.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,60 +31,46 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CRYPTO_H_
-#define CRYPTO_H_
+#ifndef _INIT_H_
+#define _INIT_H_
-/* Pass Labels/Values to crypto units */
-enum cipher_alg {
- /* Option to not do any cryptography */
- NO_CIPHER,
- CIPHER_DES,
- CIPHER_DES_CBC,
- CIPHER_DES3,
- CIPHER_DES3_CBC,
- CIPHER_AES,
- CIPHER_AES_CBC_128,
- CIPHER_KASUMI_F8,
- NUM_CRYPTO,
-};
-
-enum hash_alg {
- /* Option to not do any hash */
- NO_HASH,
- HASH_MD5,
- HASH_SHA1,
- HASH_SHA1_96,
- HASH_SHA224,
- HASH_SHA256,
- HASH_SHA384,
- HASH_SHA512,
- HASH_AES_XCBC,
- HASH_AES_XCBC_96,
- HASH_KASUMI_F9,
- NUM_HMAC,
-};
+/*
+ * #include <rte_ring.h>
+ * #include "args.h"
+ */
-/* Return value from crypto_{encrypt/decrypt} */
-enum crypto_result {
- /* Packet was successfully put into crypto queue */
- CRYPTO_RESULT_IN_PROGRESS,
- /* Cryptography has failed in some way */
- CRYPTO_RESULT_FAIL,
+/*
+ * Define a node structure with all needed info, including
+ * stats from the nodes.
+ */
+struct node {
+ struct rte_ring *rx_q;
+ unsigned int node_id;
+ /* these stats hold how many packets the node will actually receive,
+ * and how many packets were dropped because the node's queue was full.
+ * The port-info stats, in contrast, record how many packets were received
+ * or transmitted on an actual NIC port.
+ */
+ struct {
+ uint64_t rx;
+ uint64_t rx_drop;
+ } stats;
};
-extern enum crypto_result crypto_encrypt(struct rte_mbuf *pkt, enum cipher_alg c,
- enum hash_alg h);
-extern enum crypto_result crypto_decrypt(struct rte_mbuf *pkt, enum cipher_alg c,
- enum hash_alg h);
-
-extern int crypto_init(void);
+extern struct rte_efd_table *efd_table;
+extern struct node *nodes;
-extern int per_core_crypto_init(uint32_t lcore_id);
-
-extern void crypto_exit(void);
+/*
+ * shared information between server and nodes: number of nodes,
+ * port numbers, rx and tx stats etc.
+ */
+extern struct shared_info *info;
-extern void *crypto_get_next_response(void);
+extern struct rte_mempool *pktmbuf_pool;
+extern uint8_t num_nodes;
+extern unsigned int num_sockets;
+extern uint32_t num_flows;
-extern void crypto_flush_tx_queue(uint32_t lcore_id);
+int init(int argc, char *argv[]);
-#endif /* CRYPTO_H_ */
+#endif /* ifndef _INIT_H_ */
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
new file mode 100644
index 00000000..597b4c25
--- /dev/null
+++ b/examples/server_node_efd/server/main.c
@@ -0,0 +1,362 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <netinet/ip.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_atomic.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_mempool.h>
+#include <rte_memcpy.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ethdev.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_efd.h>
+#include <rte_ip.h>
+
+#include "common.h"
+#include "args.h"
+#include "init.h"
+
+/*
+ * When doing reads from the NIC or the node queues,
+ * use this batch size
+ */
+#define PACKET_READ_SIZE 32
+
+/*
+ * Local buffers to put packets in, used to send packets in bursts to the
+ * nodes
+ */
+struct node_rx_buf {
+ struct rte_mbuf *buffer[PACKET_READ_SIZE];
+ uint16_t count;
+};
+
+struct efd_stats {
+ uint64_t distributed;
+ uint64_t drop;
+} flow_dist_stats;
+
+/* One buffer per node rx queue - dynamically allocate array */
+static struct node_rx_buf *cl_rx_buf;
+
+static const char *
+get_printable_mac_addr(uint8_t port)
+{
+ static const char err_address[] = "00:00:00:00:00:00";
+ static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
+ struct ether_addr mac;
+
+ if (unlikely(port >= RTE_MAX_ETHPORTS))
+ return err_address;
+ if (unlikely(addresses[port][0] == '\0')) {
+ rte_eth_macaddr_get(port, &mac);
+ snprintf(addresses[port], sizeof(addresses[port]),
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac.addr_bytes[0], mac.addr_bytes[1],
+ mac.addr_bytes[2], mac.addr_bytes[3],
+ mac.addr_bytes[4], mac.addr_bytes[5]);
+ }
+ return addresses[port];
+}
+
+/*
+ * This function displays the recorded statistics for each port
+ * and for each node. It uses ANSI terminal codes to clear
+ * screen when called. It is called from a single non-master
+ * thread in the server process, when the process is run with more
+ * than one lcore enabled.
+ */
+static void
+do_stats_display(void)
+{
+ unsigned int i, j;
+ const char clr[] = {27, '[', '2', 'J', '\0'};
+ const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
+ uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS];
+ uint64_t node_tx[MAX_NODES], node_tx_drop[MAX_NODES];
+
+ /* to get TX stats, we need to do some summing calculations */
+ memset(port_tx, 0, sizeof(port_tx));
+ memset(port_tx_drop, 0, sizeof(port_tx_drop));
+ memset(node_tx, 0, sizeof(node_tx));
+ memset(node_tx_drop, 0, sizeof(node_tx_drop));
+
+ for (i = 0; i < num_nodes; i++) {
+ const struct tx_stats *tx = &info->tx_stats[i];
+
+ for (j = 0; j < info->num_ports; j++) {
+ const uint64_t tx_val = tx->tx[info->id[j]];
+ const uint64_t drop_val = tx->tx_drop[info->id[j]];
+
+ port_tx[j] += tx_val;
+ port_tx_drop[j] += drop_val;
+ node_tx[i] += tx_val;
+ node_tx_drop[i] += drop_val;
+ }
+ }
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("PORTS\n");
+ printf("-----\n");
+ for (i = 0; i < info->num_ports; i++)
+ printf("Port %u: '%s'\t", (unsigned int)info->id[i],
+ get_printable_mac_addr(info->id[i]));
+ printf("\n\n");
+ for (i = 0; i < info->num_ports; i++) {
+ printf("Port %u - rx: %9"PRIu64"\t"
+ "tx: %9"PRIu64"\n",
+ (unsigned int)info->id[i], info->rx_stats.rx[i],
+ port_tx[i]);
+ }
+
+ printf("\nSERVER\n");
+ printf("-----\n");
+ printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
+ flow_dist_stats.distributed, flow_dist_stats.drop);
+
+ printf("\nNODES\n");
+ printf("-------\n");
+ for (i = 0; i < num_nodes; i++) {
+ const unsigned long long rx = nodes[i].stats.rx;
+ const unsigned long long rx_drop = nodes[i].stats.rx_drop;
+ const struct filter_stats *filter = &info->filter_stats[i];
+
+ printf("Node %2u - rx: %9llu, rx_drop: %9llu\n"
+ " tx: %9"PRIu64", tx_drop: %9"PRIu64"\n"
+ " filter_passed: %9"PRIu64", "
+ "filter_drop: %9"PRIu64"\n",
+ i, rx, rx_drop, node_tx[i], node_tx_drop[i],
+ filter->passed, filter->drop);
+ }
+
+ printf("\n");
+}
+
+/*
+ * The function called from each non-master lcore used by the process.
+ * The test_and_set function is used to randomly pick a single lcore on which
+ * the code to display the statistics will run. Otherwise, the code just
+ * repeatedly sleeps.
+ */
+static int
+sleep_lcore(__attribute__((unused)) void *dummy)
+{
+ /* Used to pick a display thread - static, so zero-initialised */
+ static rte_atomic32_t display_stats;
+
+ /* Only one core should display stats */
+ if (rte_atomic32_test_and_set(&display_stats)) {
+ const unsigned int sleeptime = 1;
+
+ printf("Core %u displaying statistics\n", rte_lcore_id());
+
+ /* Longer initial pause so above printf is seen */
+ sleep(sleeptime * 3);
+
+ /* Loop forever: sleep always returns 0 or <= param */
+ while (sleep(sleeptime) <= sleeptime)
+ do_stats_display();
+ }
+ return 0;
+}
+
+/*
+ * Function to set all the node statistic values to zero.
+ * Called at program startup.
+ */
+static void
+clear_stats(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_nodes; i++)
+ nodes[i].stats.rx = nodes[i].stats.rx_drop = 0;
+}
+
+/*
+ * send a burst of traffic to a node, assuming there are packets
+ * available to be sent to this node
+ */
+static void
+flush_rx_queue(uint16_t node)
+{
+ uint16_t j;
+ struct node *cl;
+
+ if (cl_rx_buf[node].count == 0)
+ return;
+
+ cl = &nodes[node];
+ if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
+ for (j = 0; j < cl_rx_buf[node].count; j++)
+ rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
+ cl->stats.rx_drop += cl_rx_buf[node].count;
+ } else
+ cl->stats.rx += cl_rx_buf[node].count;
+
+ cl_rx_buf[node].count = 0;
+}
+
+/*
+ * marks a packet down to be sent to a particular node process
+ */
+static inline void
+enqueue_rx_packet(uint8_t node, struct rte_mbuf *buf)
+{
+ cl_rx_buf[node].buffer[cl_rx_buf[node].count++] = buf;
+}
+
+/*
+ * This function takes a group of packets and routes them
+ * individually to the node process. Very simply round-robins the packets
+ * without checking any of the packet contents.
+ */
+static void
+process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
+ uint16_t rx_count, unsigned int socket_id)
+{
+ uint16_t i;
+ uint8_t node;
+ efd_value_t data[RTE_EFD_BURST_MAX];
+ const void *key_ptrs[RTE_EFD_BURST_MAX];
+
+ struct ipv4_hdr *ipv4_hdr;
+ uint32_t ipv4_dst_ip[RTE_EFD_BURST_MAX];
+
+ for (i = 0; i < rx_count; i++) {
+ /* Handle IPv4 header.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
+ key_ptrs[i] = (void *)&ipv4_dst_ip[i];
+ }
+
+ rte_efd_lookup_bulk(efd_table, socket_id, rx_count,
+ (const void **) key_ptrs, data);
+ for (i = 0; i < rx_count; i++) {
+ node = (uint8_t) ((uintptr_t)data[i]);
+
+ if (node >= num_nodes) {
+ /*
+ * Node is out of range, which means that
+ * flow has not been inserted
+ */
+ flow_dist_stats.drop++;
+ rte_pktmbuf_free(pkts[i]);
+ } else {
+ flow_dist_stats.distributed++;
+ enqueue_rx_packet(node, pkts[i]);
+ }
+ }
+
+ for (i = 0; i < num_nodes; i++)
+ flush_rx_queue(i);
+}
+
+/*
+ * Function called by the master lcore of the DPDK process.
+ */
+static void
+do_packet_forwarding(void)
+{
+ unsigned int port_num = 0; /* indexes the port[] array */
+ unsigned int socket_id = rte_socket_id();
+
+ for (;;) {
+ struct rte_mbuf *buf[PACKET_READ_SIZE];
+ uint16_t rx_count;
+
+ /* read a port */
+ rx_count = rte_eth_rx_burst(info->id[port_num], 0,
+ buf, PACKET_READ_SIZE);
+ info->rx_stats.rx[port_num] += rx_count;
+
+ /* Now process the NIC packets read */
+ if (likely(rx_count > 0))
+ process_packets(port_num, buf, rx_count, socket_id);
+
+ /* move to next port */
+ if (++port_num == info->num_ports)
+ port_num = 0;
+ }
+}
+
+int
+main(int argc, char *argv[])
+{
+ /* initialise the system */
+ if (init(argc, argv) < 0)
+ return -1;
+ RTE_LOG(INFO, APP, "Finished Process Init.\n");
+
+ cl_rx_buf = calloc(num_nodes, sizeof(cl_rx_buf[0]));
+
+ /* clear statistics */
+ clear_stats();
+
+ /* put all other cores to sleep bar master */
+ rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER);
+
+ do_packet_forwarding();
+ return 0;
+}
diff --git a/examples/server_node_efd/shared/common.h b/examples/server_node_efd/shared/common.h
new file mode 100644
index 00000000..8a134799
--- /dev/null
+++ b/examples/server_node_efd/shared/common.h
@@ -0,0 +1,99 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#include <rte_hash_crc.h>
+#include <rte_hash.h>
+
+#define MAX_NODES 16
+/*
+ * Shared port info, including statistics information for display by server.
+ * Structure will be put in a memzone.
+ * - All port id values share one cache line as this data will be read-only
+ * during operation.
+ * - All rx statistic values share cache lines, as this data is written only
+ * by the server process. (rare reads by stats display)
+ * - The tx statistics have values for all ports per cache line, but the stats
+ * themselves are written by the nodes, so we have a distinct set, on different
+ * cache lines for each node to use.
+ */
+struct rx_stats {
+ uint64_t rx[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct tx_stats {
+ uint64_t tx[RTE_MAX_ETHPORTS];
+ uint64_t tx_drop[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct filter_stats {
+ uint64_t drop;
+ uint64_t passed;
+} __rte_cache_aligned;
+
+struct shared_info {
+ uint8_t num_nodes;
+ uint8_t num_ports;
+ uint32_t num_flows;
+ uint8_t id[RTE_MAX_ETHPORTS];
+ struct rx_stats rx_stats;
+ struct tx_stats tx_stats[MAX_NODES];
+ struct filter_stats filter_stats[MAX_NODES];
+};
+
+/* define common names for structures shared between server and node */
+#define MP_NODE_RXQ_NAME "MProc_Node_%u_RX"
+#define PKTMBUF_POOL_NAME "MProc_pktmbuf_pool"
+#define MZ_SHARED_INFO "MProc_shared_info"
+
+/*
+ * Given the rx queue name template above, get the queue name
+ */
+static inline const char *
+get_rx_queue_name(unsigned int id)
+{
+ /*
+ * Buffer for return value. Size calculated by %u being replaced
+ * by maximum 3 digits (plus an extra byte for safety)
+ */
+ static char buffer[sizeof(MP_NODE_RXQ_NAME) + 2];
+
+ snprintf(buffer, sizeof(buffer) - 1, MP_NODE_RXQ_NAME, id);
+ return buffer;
+}
+
+#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+
+#endif
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index 1d6d4635..cd6e3f1c 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -49,7 +49,7 @@
#include <rte_log.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
#include "main.h"
#include "vxlan.h"
@@ -68,7 +68,7 @@
(nb_switching_cores * MBUF_CACHE_SIZE))
#define MBUF_CACHE_SIZE 128
-#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@@ -567,7 +567,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
unsigned len, ret = 0;
const uint16_t lcore_id = rte_lcore_id();
- RTE_LOG(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
vdev->vid);
/* Add packet to the port tx queue */
@@ -649,7 +649,7 @@ switch_worker(__rte_unused void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- RTE_LOG(DEBUG, VHOST_DATA, "TX queue drained after "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "TX queue drained after "
"timeout with burst size %u\n",
tx_q->len);
ret = overlay_options.tx_handle(ports[0],
@@ -1081,7 +1081,7 @@ new_device(int vid)
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
*/
-static const struct virtio_net_device_ops virtio_net_device_ops = {
+static const struct vhost_device_ops virtio_net_device_ops = {
.new_device = new_device,
.destroy_device = destroy_device,
};
@@ -1199,15 +1199,13 @@ main(int argc, char *argv[])
MAX_SUP_PORTS);
}
/* Create the mbuf pool. */
- mbuf_pool = rte_mempool_create(
+ mbuf_pool = rte_pktmbuf_pool_create(
"MBUF_POOL",
- NUM_MBUFS_PER_PORT
- * valid_nb_ports,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ NUM_MBUFS_PER_PORT * valid_nb_ports,
+ MBUF_CACHE_SIZE,
+ 0,
+ MBUF_DATA_SIZE,
+ rte_socket_id());
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
@@ -1250,15 +1248,28 @@ main(int argc, char *argv[])
rte_eal_remote_launch(switch_worker,
mbuf_pool, lcore_id);
}
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
ret = rte_vhost_driver_register((char *)&dev_basename, 0);
if (ret != 0)
rte_exit(EXIT_FAILURE, "failed to register vhost driver.\n");
- rte_vhost_driver_callback_register(&virtio_net_device_ops);
+ rte_vhost_driver_disable_features(dev_basename,
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF);
- rte_vhost_driver_session_start();
+ ret = rte_vhost_driver_callback_register(dev_basename,
+ &virtio_net_device_ops);
+ if (ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "failed to register vhost driver callbacks.\n");
+ }
+
+ if (rte_vhost_driver_start(dev_basename) < 0) {
+ rte_exit(EXIT_FAILURE,
+ "failed to start vhost driver.\n");
+ }
+
+ RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ rte_eal_wait_lcore(lcore_id);
return 0;
}
diff --git a/examples/tep_termination/main.h b/examples/tep_termination/main.h
index c0ea7667..8ed817d4 100644
--- a/examples/tep_termination/main.h
+++ b/examples/tep_termination/main.h
@@ -54,6 +54,8 @@
/* Max number of devices. Limited by the application. */
#define MAX_DEVICES 64
+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+
/* Per-device statistics struct */
struct device_statistics {
uint64_t tx_total;
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index 8f1f15bb..b57c0451 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -49,7 +49,7 @@
#include <rte_tcp.h>
#include "main.h"
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
#include "vxlan.h"
#include "vxlan_setup.h"
@@ -102,7 +102,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/vhost/Makefile b/examples/vhost/Makefile
index e95c68ae..af7be99a 100644
--- a/examples/vhost/Makefile
+++ b/examples/vhost/Makefile
@@ -48,7 +48,7 @@ else
APP = vhost-switch
# all source are stored in SRCS-y
-SRCS-y := main.c
+SRCS-y := main.c virtio_net.c
CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index eddaf926..e07f8669 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,7 +49,7 @@
#include <rte_log.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
#include <rte_ip.h>
#include <rte_tcp.h>
@@ -65,7 +65,6 @@
#define MBUF_CACHE_SIZE 128
#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
-#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
@@ -129,6 +128,8 @@ static uint32_t enable_tso;
static int client_mode;
static int dequeue_zero_copy;
+static int builtin_net_driver;
+
/* Specify timeout (in useconds) between retries on RX. */
static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
/* Specify the number of retries on RX. */
@@ -153,7 +154,7 @@ static struct rte_eth_conf vmdq_conf_default = {
*/
.hw_vlan_strip = 1, /**< VLAN strip enabled. */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
@@ -328,16 +329,6 @@ port_init(uint8_t port)
if (port >= rte_eth_dev_count()) return -1;
- if (enable_tx_csum == 0)
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
-
- if (enable_tso == 0) {
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4);
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6);
- }
-
rx_rings = (uint16_t)dev_info.max_rx_queues;
/* Configure ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -405,7 +396,7 @@ static int
us_vhost_parse_socket_path(const char *q_arg)
{
/* parse number string */
- if (strnlen(q_arg, PATH_MAX) > PATH_MAX)
+ if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
return -1;
socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
@@ -509,6 +500,7 @@ us_vhost_parse_args(int argc, char **argv)
{"tso", required_argument, NULL, 0},
{"client", no_argument, &client_mode, 1},
{"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
+ {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
{NULL, 0, 0, 0},
};
@@ -531,7 +523,6 @@ us_vhost_parse_args(int argc, char **argv)
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
ETH_VMDQ_ACCEPT_BROADCAST |
ETH_VMDQ_ACCEPT_MULTICAST;
- rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
break;
@@ -806,7 +797,12 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
{
uint16_t ret;
- ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+ if (builtin_net_driver) {
+ ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
+ } else {
+ ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+ }
+
if (enable_stats) {
rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
@@ -832,17 +828,17 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
return -1;
if (vdev->vid == dst_vdev->vid) {
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: src and dst MAC is same. Dropping packet.\n",
vdev->vid);
return 0;
}
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: MAC address is local\n", dst_vdev->vid);
if (unlikely(dst_vdev->remove)) {
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) device is marked for removal\n", dst_vdev->vid);
return 0;
}
@@ -867,7 +863,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
return 0;
if (vdev->vid == dst_vdev->vid) {
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: src and dst MAC is same. Dropping packet.\n",
vdev->vid);
return -1;
@@ -881,7 +877,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
*offset = VLAN_HLEN;
*vlan_tag = vlan_tags[vdev->vid];
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
vdev->vid, dst_vdev->vid, *vlan_tag);
@@ -973,7 +969,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
}
}
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: MAC address is external\n", vdev->vid);
queue2nic:
@@ -1041,7 +1037,7 @@ drain_mbuf_table(struct mbuf_table *tx_q)
if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
prev_tsc = cur_tsc;
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"TX queue drained after timeout with burst size %u\n",
tx_q->len);
do_drain_mbuf_table(tx_q);
@@ -1077,8 +1073,13 @@ drain_eth_rx(struct vhost_dev *vdev)
}
}
- enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ if (builtin_net_driver) {
+ enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
+ pkts, rx_count);
+ } else {
+ enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
pkts, rx_count);
+ }
if (enable_stats) {
rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
@@ -1094,8 +1095,13 @@ drain_virtio_tx(struct vhost_dev *vdev)
uint16_t count;
uint16_t i;
- count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
+ if (builtin_net_driver) {
+ count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
pkts, MAX_PKT_BURST);
+ } else {
+ count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
+ mbuf_pool, pkts, MAX_PKT_BURST);
+ }
/* setup VMDq for the first packet */
if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
@@ -1198,6 +1204,9 @@ destroy_device(int vid)
rte_pause();
}
+ if (builtin_net_driver)
+ vs_vhost_net_remove(vdev);
+
TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
lcore_vdev_entry);
TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
@@ -1246,6 +1255,9 @@ new_device(int vid)
}
vdev->vid = vid;
+ if (builtin_net_driver)
+ vs_vhost_net_setup(vdev);
+
TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
@@ -1281,7 +1293,7 @@ new_device(int vid)
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
*/
-static const struct virtio_net_device_ops virtio_net_device_ops =
+static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
@@ -1509,9 +1521,6 @@ main(int argc, char *argv[])
RTE_LCORE_FOREACH_SLAVE(lcore_id)
rte_eal_remote_launch(switch_worker, NULL, lcore_id);
- if (mergeable == 0)
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
-
if (client_mode)
flags |= RTE_VHOST_USER_CLIENT;
@@ -1520,18 +1529,59 @@ main(int argc, char *argv[])
/* Register vhost user driver to handle vhost messages. */
for (i = 0; i < nb_sockets; i++) {
- ret = rte_vhost_driver_register
- (socket_files + i * PATH_MAX, flags);
+ char *file = socket_files + i * PATH_MAX;
+ ret = rte_vhost_driver_register(file, flags);
if (ret != 0) {
unregister_drivers(i);
rte_exit(EXIT_FAILURE,
"vhost driver register failure.\n");
}
+
+ if (builtin_net_driver)
+ rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
+
+ if (mergeable == 0) {
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF);
+ }
+
+ if (enable_tx_csum == 0) {
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_CSUM);
+ }
+
+ if (enable_tso == 0) {
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_HOST_TSO4);
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_HOST_TSO6);
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_GUEST_TSO4);
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ }
+
+ if (promiscuous) {
+ rte_vhost_driver_enable_features(file,
+ 1ULL << VIRTIO_NET_F_CTRL_RX);
+ }
+
+ ret = rte_vhost_driver_callback_register(file,
+ &virtio_net_device_ops);
+ if (ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "failed to register vhost driver callbacks.\n");
+ }
+
+ if (rte_vhost_driver_start(file) < 0) {
+ rte_exit(EXIT_FAILURE,
+ "failed to start vhost driver.\n");
+ }
}
- rte_vhost_driver_callback_register(&virtio_net_device_ops);
+ RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ rte_eal_wait_lcore(lcore_id);
- rte_vhost_driver_session_start();
return 0;
}
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 6bb42e89..9a2aca37 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,11 +36,17 @@
#include <sys/queue.h>
+#include <rte_ether.h>
+
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
#define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+
+#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
+
struct device_statistics {
uint64_t tx;
uint64_t tx_total;
@@ -48,6 +54,12 @@ struct device_statistics {
rte_atomic64_t rx_total_atomic;
};
+struct vhost_queue {
+ struct rte_vhost_vring vr;
+ uint16_t last_avail_idx;
+ uint16_t last_used_idx;
+};
+
struct vhost_dev {
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
@@ -65,9 +77,16 @@ struct vhost_dev {
volatile uint8_t remove;
int vid;
+ uint64_t features;
+ size_t hdr_len;
+ uint16_t nr_vrings;
+ struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
TAILQ_ENTRY(vhost_dev) lcore_vdev_entry;
+
+#define MAX_QUEUE_PAIRS 4
+ struct vhost_queue queues[MAX_QUEUE_PAIRS * 2];
} __rte_cache_aligned;
TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
@@ -88,4 +107,15 @@ struct lcore_info {
struct vhost_dev_tailq_list vdev_list;
};
+/* we implement non-extra virtio net features */
+#define VIRTIO_NET_FEATURES 0
+
+void vs_vhost_net_setup(struct vhost_dev *dev);
+void vs_vhost_net_remove(struct vhost_dev *dev);
+uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+
+uint16_t vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
#endif /* _MAIN_H_ */
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
new file mode 100644
index 00000000..cc2c3d88
--- /dev/null
+++ b/examples/vhost/virtio_net.c
@@ -0,0 +1,403 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <linux/virtio_net.h>
+
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_vhost.h>
+
+#include "main.h"
+
+/*
+ * A very simple vhost-user net driver implementation, without
+ * any extra features being enabled, such as TSO and mrg-Rx.
+ */
+
+void
+vs_vhost_net_setup(struct vhost_dev *dev)
+{
+ uint16_t i;
+ int vid = dev->vid;
+ struct vhost_queue *queue;
+
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "setting builtin vhost-user net driver\n");
+
+ rte_vhost_get_negotiated_features(vid, &dev->features);
+ if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
+ dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ dev->hdr_len = sizeof(struct virtio_net_hdr);
+
+ rte_vhost_get_mem_table(vid, &dev->mem);
+
+ dev->nr_vrings = rte_vhost_get_vring_num(vid);
+ for (i = 0; i < dev->nr_vrings; i++) {
+ queue = &dev->queues[i];
+
+ queue->last_used_idx = 0;
+ queue->last_avail_idx = 0;
+ rte_vhost_get_vhost_vring(vid, i, &queue->vr);
+ }
+}
+
+void
+vs_vhost_net_remove(struct vhost_dev *dev)
+{
+ free(dev->mem);
+}
+
+static inline int __attribute__((always_inline))
+enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
+ struct rte_mbuf *m, uint16_t desc_idx)
+{
+ uint32_t desc_avail, desc_offset;
+ uint32_t mbuf_avail, mbuf_offset;
+ uint32_t cpy_len;
+ struct vring_desc *desc;
+ uint64_t desc_addr;
+ struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
+ /* A counter to avoid desc dead loop chain */
+ uint16_t nr_desc = 1;
+
+ desc = &vr->desc[desc_idx];
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ /*
+ * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
+ * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
+ * otherwise stores offset on the stack instead of in a register.
+ */
+ if (unlikely(desc->len < dev->hdr_len) || !desc_addr)
+ return -1;
+
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ /* write virtio-net header */
+ *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
+
+ desc_offset = dev->hdr_len;
+ desc_avail = desc->len - dev->hdr_len;
+
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ mbuf_offset = 0;
+ while (mbuf_avail != 0 || m->next != NULL) {
+ /* done with current mbuf, fetch next */
+ if (mbuf_avail == 0) {
+ m = m->next;
+
+ mbuf_offset = 0;
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ }
+
+ /* done with current desc buf, fetch next */
+ if (desc_avail == 0) {
+ if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
+ /* Room in vring buffer is not enough */
+ return -1;
+ }
+ if (unlikely(desc->next >= vr->size ||
+ ++nr_desc > vr->size))
+ return -1;
+
+ desc = &vr->desc[desc->next];
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ }
+
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+ }
+
+ return 0;
+}
+
+uint16_t
+vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_queue *queue;
+ struct rte_vhost_vring *vr;
+ uint16_t avail_idx, free_entries, start_idx;
+ uint16_t desc_indexes[MAX_PKT_BURST];
+ uint16_t used_idx;
+ uint32_t i;
+
+ queue = &dev->queues[queue_id];
+ vr = &queue->vr;
+
+ avail_idx = *((volatile uint16_t *)&vr->avail->idx);
+ start_idx = queue->last_used_idx;
+ free_entries = avail_idx - start_idx;
+ count = RTE_MIN(count, free_entries);
+ count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
+ if (count == 0)
+ return 0;
+
+ /* Retrieve all of the desc indexes first to avoid caching issues. */
+ rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]);
+ for (i = 0; i < count; i++) {
+ used_idx = (start_idx + i) & (vr->size - 1);
+ desc_indexes[i] = vr->avail->ring[used_idx];
+ vr->used->ring[used_idx].id = desc_indexes[i];
+ vr->used->ring[used_idx].len = pkts[i]->pkt_len +
+ dev->hdr_len;
+ }
+
+ rte_prefetch0(&vr->desc[desc_indexes[0]]);
+ for (i = 0; i < count; i++) {
+ uint16_t desc_idx = desc_indexes[i];
+ int err;
+
+ err = enqueue_pkt(dev, vr, pkts[i], desc_idx);
+ if (unlikely(err)) {
+ used_idx = (start_idx + i) & (vr->size - 1);
+ vr->used->ring[used_idx].len = dev->hdr_len;
+ }
+
+ if (i + 1 < count)
+ rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
+ }
+
+ rte_smp_wmb();
+
+ *(volatile uint16_t *)&vr->used->idx += count;
+ queue->last_used_idx += count;
+
+ /* flush used->idx update before we read avail->flags. */
+ rte_mb();
+
+ /* Kick the guest if necessary. */
+ if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vr->callfd >= 0))
+ eventfd_write(vr->callfd, (eventfd_t)1);
+ return count;
+}
+
+static inline int __attribute__((always_inline))
+dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
+ struct rte_mbuf *m, uint16_t desc_idx,
+ struct rte_mempool *mbuf_pool)
+{
+ struct vring_desc *desc;
+ uint64_t desc_addr;
+ uint32_t desc_avail, desc_offset;
+ uint32_t mbuf_avail, mbuf_offset;
+ uint32_t cpy_len;
+ struct rte_mbuf *cur = m, *prev = m;
+ /* A counter to avoid desc dead loop chain */
+ uint32_t nr_desc = 1;
+
+ desc = &vr->desc[desc_idx];
+ if (unlikely((desc->len < dev->hdr_len)) ||
+ (desc->flags & VRING_DESC_F_INDIRECT))
+ return -1;
+
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ /*
+ * We don't support ANY_LAYOUT, neither VERSION_1, meaning
+ * a Tx packet from guest must have 2 desc buffers at least:
+ * the first for storing the header and the others for
+ * storing the data.
+ *
+ * And since we don't support TSO, we could simply skip the
+ * header.
+ */
+ desc = &vr->desc[desc->next];
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ nr_desc += 1;
+
+ mbuf_offset = 0;
+ mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
+ while (1) {
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset),
+ (void *)((uintptr_t)(desc_addr + desc_offset)),
+ cpy_len);
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+
+ /* This desc reaches to its end, get the next one */
+ if (desc_avail == 0) {
+ if ((desc->flags & VRING_DESC_F_NEXT) == 0)
+ break;
+
+ if (unlikely(desc->next >= vr->size ||
+ ++nr_desc > vr->size))
+ return -1;
+ desc = &vr->desc[desc->next];
+
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ }
+
+ /*
+ * This mbuf reaches to its end, get a new one
+ * to hold more data.
+ */
+ if (mbuf_avail == 0) {
+ cur = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(cur == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA, "Failed to "
+ "allocate memory for mbuf.\n");
+ return -1;
+ }
+
+ prev->next = cur;
+ prev->data_len = mbuf_offset;
+ m->nb_segs += 1;
+ m->pkt_len += mbuf_offset;
+ prev = cur;
+
+ mbuf_offset = 0;
+ mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
+ }
+ }
+
+ prev->data_len = mbuf_offset;
+ m->pkt_len += mbuf_offset;
+
+ return 0;
+}
+
+uint16_t
+vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+ struct vhost_queue *queue;
+ struct rte_vhost_vring *vr;
+ uint32_t desc_indexes[MAX_PKT_BURST];
+ uint32_t used_idx;
+ uint32_t i = 0;
+ uint16_t free_entries;
+ uint16_t avail_idx;
+
+ queue = &dev->queues[queue_id];
+ vr = &queue->vr;
+
+ free_entries = *((volatile uint16_t *)&vr->avail->idx) -
+ queue->last_avail_idx;
+ if (free_entries == 0)
+ return 0;
+
+ /* Prefetch available and used ring */
+ avail_idx = queue->last_avail_idx & (vr->size - 1);
+ used_idx = queue->last_used_idx & (vr->size - 1);
+ rte_prefetch0(&vr->avail->ring[avail_idx]);
+ rte_prefetch0(&vr->used->ring[used_idx]);
+
+ count = RTE_MIN(count, MAX_PKT_BURST);
+ count = RTE_MIN(count, free_entries);
+
+ /*
+ * Retrieve all of the head indexes first and pre-update used entries
+ * to avoid caching issues.
+ */
+ for (i = 0; i < count; i++) {
+ avail_idx = (queue->last_avail_idx + i) & (vr->size - 1);
+ used_idx = (queue->last_used_idx + i) & (vr->size - 1);
+ desc_indexes[i] = vr->avail->ring[avail_idx];
+
+ vr->used->ring[used_idx].id = desc_indexes[i];
+ vr->used->ring[used_idx].len = 0;
+ }
+
+ /* Prefetch descriptor index. */
+ rte_prefetch0(&vr->desc[desc_indexes[0]]);
+ for (i = 0; i < count; i++) {
+ int err;
+
+ if (likely(i + 1 < count))
+ rte_prefetch0(&vr->desc[desc_indexes[i + 1]]);
+
+ pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(pkts[i] == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to allocate memory for mbuf.\n");
+ break;
+ }
+
+ err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool);
+ if (unlikely(err)) {
+ rte_pktmbuf_free(pkts[i]);
+ break;
+ }
+
+ }
+ if (!i)
+ return 0;
+
+ queue->last_avail_idx += i;
+ queue->last_used_idx += i;
+ rte_smp_wmb();
+ rte_smp_rmb();
+
+ vr->used->idx += i;
+
+ if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vr->callfd >= 0))
+ eventfd_write(vr->callfd, (eventfd_t)1);
+
+ return i;
+}
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
index f4dbaa48..d9ef140f 100644
--- a/examples/vhost_xen/main.c
+++ b/examples/vhost_xen/main.c
@@ -149,7 +149,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
*/
.hw_vlan_strip = 1, /**< VLAN strip enabled. */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
@@ -525,7 +525,7 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
break;
}
}
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
return vhost_va;
@@ -555,7 +555,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
uint8_t success = 0;
void *userdata;
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
vq = dev->virtqueue_rx;
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/* As many data cores may want access to available buffers, they need to be reserved. */
@@ -580,7 +580,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
@@ -775,7 +775,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
/* Drop the packet if the TX packet is destined for the TX device. */
if (dev_ll->dev->device_fh == dev->device_fh) {
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
"Source and destination MAC addresses are the same. "
"Dropping packet.\n",
dev_ll->dev->device_fh);
@@ -783,12 +783,12 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
}
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
"MAC address is local\n", dev_ll->dev->device_fh);
if (dev_ll->dev->remove) {
/*drop the packet if the device is marked for removal*/
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
"Device is marked for removal\n",
dev_ll->dev->device_fh);
} else {
@@ -829,7 +829,7 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
return;
}
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
"MAC address is external\n", dev->device_fh);
/*Add packet to the port tx queue*/
@@ -903,7 +903,7 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
if (vq->last_used_idx == avail_idx)
return;
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
@@ -913,7 +913,7 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
free_entries = avail_idx - vq->last_used_idx;
free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
@@ -1003,7 +1003,7 @@ switch_worker(__attribute__((unused)) void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"TX queue drained after timeout with burst size %u\n",
tx_q->len);