aboutsummaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 11:59:50 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 12:00:19 +0000
commit8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch)
tree208e3bc33c220854d89d010e3abf720a2e62e546 /examples
parentb63264c8342e6a1b6971c79550d2af2024b6a4de (diff)
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/Makefile2
-rw-r--r--examples/bbdev_app/main.c1
-rw-r--r--examples/bond/main.c1
-rw-r--r--examples/cmdline/Makefile1
-rw-r--r--examples/ethtool/ethtool-app/Makefile2
-rw-r--r--examples/eventdev_pipeline/main.c88
-rw-r--r--examples/eventdev_pipeline/pipeline_common.h31
-rw-r--r--examples/eventdev_pipeline/pipeline_worker_generic.c268
-rw-r--r--examples/eventdev_pipeline/pipeline_worker_tx.c156
-rw-r--r--examples/exception_path/main.c3
-rw-r--r--examples/flow_filtering/main.c17
-rw-r--r--examples/ip_fragmentation/main.c3
-rw-r--r--examples/ip_pipeline/Makefile1
-rw-r--r--examples/ip_pipeline/action.c67
-rw-r--r--examples/ip_pipeline/action.h1
-rw-r--r--examples/ip_pipeline/cli.c831
-rw-r--r--examples/ip_pipeline/conn.c1
-rw-r--r--examples/ip_pipeline/cryptodev.c117
-rw-r--r--examples/ip_pipeline/cryptodev.h43
-rw-r--r--examples/ip_pipeline/examples/flow_crypto.cli58
-rw-r--r--examples/ip_pipeline/hash_func.h357
-rw-r--r--examples/ip_pipeline/hash_func_arm64.h232
-rw-r--r--examples/ip_pipeline/link.c1
-rw-r--r--examples/ip_pipeline/main.c9
-rw-r--r--examples/ip_pipeline/meson.build3
-rw-r--r--examples/ip_pipeline/pipeline.c79
-rw-r--r--examples/ip_pipeline/pipeline.h16
-rw-r--r--examples/ip_pipeline/thread.c70
-rw-r--r--examples/ip_reassembly/main.c3
-rw-r--r--examples/ipsec-secgw/esp.c3
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c23
-rw-r--r--examples/ipsec-secgw/sa.c11
-rw-r--r--examples/ipv4_multicast/main.c5
-rw-r--r--examples/kni/Makefile2
-rw-r--r--examples/kni/main.c95
-rw-r--r--examples/kni/meson.build1
-rw-r--r--examples/l2fwd-cat/Makefile2
-rw-r--r--examples/l2fwd-cat/meson.build1
-rw-r--r--examples/l2fwd-crypto/main.c1
-rw-r--r--examples/l2fwd-jobstats/main.c1
-rw-r--r--examples/l2fwd-keepalive/main.c1
-rw-r--r--examples/l2fwd/main.c1
-rw-r--r--examples/l3fwd-acl/main.c3
-rw-r--r--examples/l3fwd-power/Makefile3
-rw-r--r--examples/l3fwd-power/main.c345
-rw-r--r--examples/l3fwd-power/meson.build1
-rw-r--r--examples/l3fwd-vf/main.c3
-rw-r--r--examples/l3fwd/main.c3
-rw-r--r--examples/link_status_interrupt/main.c1
-rw-r--r--examples/load_balancer/Makefile1
-rw-r--r--examples/load_balancer/init.c3
-rw-r--r--examples/meson.build4
-rw-r--r--examples/multi_process/Makefile1
-rw-r--r--examples/multi_process/hotplug_mp/Makefile23
-rw-r--r--examples/multi_process/hotplug_mp/commands.c214
-rw-r--r--examples/multi_process/hotplug_mp/commands.h10
-rw-r--r--examples/multi_process/hotplug_mp/main.c41
-rw-r--r--examples/multi_process/symmetric_mp/main.c3
-rw-r--r--examples/netmap_compat/bridge/bridge.c1
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c5
-rw-r--r--examples/performance-thread/pthread_shim/main.c1
-rw-r--r--examples/performance-thread/pthread_shim/pthread_shim.c1
-rw-r--r--examples/qos_meter/main.c3
-rw-r--r--examples/qos_sched/Makefile2
-rw-r--r--examples/qos_sched/init.c1
-rw-r--r--examples/quota_watermark/qw/init.c1
-rw-r--r--examples/service_cores/main.c6
-rw-r--r--examples/tep_termination/Makefile1
-rw-r--r--examples/tep_termination/vxlan_setup.c1
-rw-r--r--examples/vdpa/Makefile32
-rw-r--r--examples/vdpa/main.c454
-rw-r--r--examples/vdpa/meson.build16
-rw-r--r--examples/vhost/Makefile1
-rw-r--r--examples/vhost/main.c6
-rw-r--r--examples/vhost_crypto/Makefile1
-rw-r--r--examples/vhost_crypto/main.c480
-rw-r--r--examples/vhost_crypto/meson.build2
-rw-r--r--examples/vhost_scsi/Makefile4
-rw-r--r--examples/vhost_scsi/meson.build2
-rw-r--r--examples/vm_power_manager/Makefile6
-rw-r--r--examples/vm_power_manager/channel_manager.c180
-rw-r--r--examples/vm_power_manager/channel_manager.h21
-rw-r--r--examples/vm_power_manager/channel_monitor.c532
-rw-r--r--examples/vm_power_manager/guest_cli/meson.build21
-rw-r--r--examples/vm_power_manager/guest_cli/vm_power_cli_guest.c1
-rw-r--r--examples/vm_power_manager/main.c2
-rw-r--r--examples/vm_power_manager/meson.build37
87 files changed, 3687 insertions, 1402 deletions
diff --git a/examples/Makefile b/examples/Makefile
index 481720cb..356fcb1c 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -65,7 +65,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += tep_termination
endif
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += timer
-DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost vhost_scsi
+DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost vhost_scsi vdpa
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost_crypto
endif
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 045a190b..d68c06ae 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -64,7 +64,6 @@ static const struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 23d0981a..b282e68b 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -122,7 +122,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/cmdline/Makefile b/examples/cmdline/Makefile
index 7893c85b..a617cce1 100644
--- a/examples/cmdline/Makefile
+++ b/examples/cmdline/Makefile
@@ -56,7 +56,6 @@ SRCS-y := main.c commands.c parse_obj_list.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-CFLAGS_parse_obj_list.o := -D_GNU_SOURCE
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/ethtool/ethtool-app/Makefile b/examples/ethtool/ethtool-app/Makefile
index 4cd9efdd..9ecfc0b8 100644
--- a/examples/ethtool/ethtool-app/Makefile
+++ b/examples/ethtool/ethtool-app/Makefile
@@ -16,7 +16,7 @@ APP = ethtool
# all source are stored in SRCS-y
SRCS-y := main.c ethapp.c
-CFLAGS += -O3 -D_GNU_SOURCE -pthread -I$(SRCDIR)/../lib
+CFLAGS += -O3 -pthread -I$(SRCDIR)/../lib
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -L$(subst ethtool-app,lib,$(RTE_OUTPUT))/lib
diff --git a/examples/eventdev_pipeline/main.c b/examples/eventdev_pipeline/main.c
index 700bc696..92e08bc0 100644
--- a/examples/eventdev_pipeline/main.c
+++ b/examples/eventdev_pipeline/main.c
@@ -26,20 +26,6 @@ core_in_use(unsigned int lcore_id) {
fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
}
-static void
-eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
- void *userdata)
-{
- int port_id = (uintptr_t) userdata;
- unsigned int _sent = 0;
-
- do {
- /* Note: hard-coded TX queue */
- _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
- unsent - _sent);
- } while (_sent != unsent);
-}
-
/*
* Parse the coremask given as argument (hexadecimal string) and fill
* the global configuration (core role and core count) with the parsed
@@ -263,6 +249,7 @@ parse_app_args(int argc, char **argv)
static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
+ struct rte_eth_rxconf rx_conf;
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
@@ -291,6 +278,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ rx_conf = dev_info.default_rxconf;
+ rx_conf.offloads = port_conf.rxmode.offloads;
port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
@@ -311,7 +300,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
/* Allocate and set up 1 RX queue per Ethernet port. */
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
- rte_eth_dev_socket_id(port), NULL, mbuf_pool);
+ rte_eth_dev_socket_id(port), &rx_conf,
+ mbuf_pool);
if (retval < 0)
return retval;
}
@@ -350,7 +340,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
static int
init_ports(uint16_t num_ports)
{
- uint16_t portid, i;
+ uint16_t portid;
if (!cdata.num_mbuf)
cdata.num_mbuf = 16384 * num_ports;
@@ -367,36 +357,26 @@ init_ports(uint16_t num_ports)
rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
- RTE_ETH_FOREACH_DEV(i) {
- void *userdata = (void *)(uintptr_t) i;
- fdata->tx_buf[i] =
- rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
- if (fdata->tx_buf[i] == NULL)
- rte_panic("Out of memory\n");
- rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
- rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
- eth_tx_buffer_retry,
- userdata);
- }
-
return 0;
}
static void
do_capability_setup(uint8_t eventdev_id)
{
+ int ret;
uint16_t i;
- uint8_t mt_unsafe = 0;
+ uint8_t generic_pipeline = 0;
uint8_t burst = 0;
RTE_ETH_FOREACH_DEV(i) {
- struct rte_eth_dev_info dev_info;
- memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
-
- rte_eth_dev_info_get(i, &dev_info);
- /* Check if it is safe ask worker to tx. */
- mt_unsafe |= !(dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_MT_LOCKFREE);
+ uint32_t caps = 0;
+
+ ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Invalid capability for Tx adptr port %d\n", i);
+ generic_pipeline |= !(caps &
+ RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
}
struct rte_event_dev_info eventdev_info;
@@ -406,21 +386,42 @@ do_capability_setup(uint8_t eventdev_id)
burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
0;
- if (mt_unsafe)
+ if (generic_pipeline)
set_worker_generic_setup_data(&fdata->cap, burst);
else
- set_worker_tx_setup_data(&fdata->cap, burst);
+ set_worker_tx_enq_setup_data(&fdata->cap, burst);
}
static void
signal_handler(int signum)
{
+ static uint8_t once;
+ uint16_t portid;
+
if (fdata->done)
rte_exit(1, "Exiting on signal %d\n", signum);
- if (signum == SIGINT || signum == SIGTERM) {
+ if ((signum == SIGINT || signum == SIGTERM) && !once) {
printf("\n\nSignal %d received, preparing to exit...\n",
signum);
+ if (cdata.dump_dev)
+ rte_event_dev_dump(0, stdout);
+ once = 1;
fdata->done = 1;
+ rte_smp_wmb();
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ rte_event_eth_rx_adapter_stop(portid);
+ rte_event_eth_tx_adapter_stop(portid);
+ rte_eth_dev_stop(portid);
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ rte_eth_dev_close(portid);
+ }
+
+ rte_event_dev_close(0);
}
if (signum == SIGTSTP)
rte_event_dev_dump(0, stdout);
@@ -499,7 +500,7 @@ main(int argc, char **argv)
if (worker_data == NULL)
rte_panic("rte_calloc failed\n");
- int dev_id = fdata->cap.evdev_setup(&cons_data, worker_data);
+ int dev_id = fdata->cap.evdev_setup(worker_data);
if (dev_id < 0)
rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
@@ -524,8 +525,8 @@ main(int argc, char **argv)
if (fdata->tx_core[lcore_id])
printf(
- "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
- __func__, lcore_id, cons_data.port_id);
+ "[%s()] lcore %d executing NIC Tx\n",
+ __func__, lcore_id);
if (fdata->sched_core[lcore_id])
printf("[%s()] lcore %d executing scheduler\n",
@@ -555,9 +556,6 @@ main(int argc, char **argv)
rte_eal_mp_wait_lcore();
- if (cdata.dump_dev)
- rte_event_dev_dump(dev_id, stdout);
-
if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
(uint64_t)-ENOTSUP)) {
printf("\nPort Workload distribution:\n");
diff --git a/examples/eventdev_pipeline/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h
index 9703396f..a6cc912f 100644
--- a/examples/eventdev_pipeline/pipeline_common.h
+++ b/examples/eventdev_pipeline/pipeline_common.h
@@ -16,6 +16,7 @@
#include <rte_ethdev.h>
#include <rte_eventdev.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
#include <rte_service.h>
#include <rte_service_component.h>
@@ -23,38 +24,30 @@
#define BATCH_SIZE 16
#define MAX_NUM_CORE 64
-struct cons_data {
- uint8_t dev_id;
- uint8_t port_id;
- uint8_t release;
-} __rte_cache_aligned;
-
struct worker_data {
uint8_t dev_id;
uint8_t port_id;
} __rte_cache_aligned;
typedef int (*worker_loop)(void *);
-typedef int (*consumer_loop)(void);
typedef void (*schedule_loop)(unsigned int);
-typedef int (*eventdev_setup)(struct cons_data *, struct worker_data *);
-typedef void (*rx_adapter_setup)(uint16_t nb_ports);
+typedef int (*eventdev_setup)(struct worker_data *);
+typedef void (*adapter_setup)(uint16_t nb_ports);
typedef void (*opt_check)(void);
struct setup_data {
worker_loop worker;
- consumer_loop consumer;
schedule_loop scheduler;
eventdev_setup evdev_setup;
- rx_adapter_setup adptr_setup;
+ adapter_setup adptr_setup;
opt_check check_opt;
};
struct fastpath_data {
volatile int done;
- uint32_t tx_lock;
uint32_t evdev_service_id;
uint32_t rxadptr_service_id;
+ uint32_t txadptr_service_id;
bool rx_single;
bool tx_single;
bool sched_single;
@@ -62,7 +55,6 @@ struct fastpath_data {
unsigned int tx_core[MAX_NUM_CORE];
unsigned int sched_core[MAX_NUM_CORE];
unsigned int worker_core[MAX_NUM_CORE];
- struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
struct setup_data cap;
} __rte_cache_aligned;
@@ -88,6 +80,8 @@ struct config_data {
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
uint8_t rx_adapter_id;
+ uint8_t tx_adapter_id;
+ uint8_t tx_queue_id;
uint64_t worker_lcore_mask;
uint64_t rx_lcore_mask;
uint64_t tx_lcore_mask;
@@ -99,8 +93,6 @@ struct port_link {
uint8_t priority;
};
-struct cons_data cons_data;
-
struct fastpath_data *fdata;
struct config_data cdata;
@@ -142,12 +134,11 @@ schedule_devices(unsigned int lcore_id)
}
}
- if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
- rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
- fdata->cap.consumer();
- rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
+ if (fdata->tx_core[lcore_id]) {
+ rte_service_run_iter_on_app_lcore(fdata->txadptr_service_id,
+ !fdata->tx_single);
}
}
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
-void set_worker_tx_setup_data(struct setup_data *caps, bool burst);
+void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index 2215e9eb..16906494 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -119,153 +119,13 @@ worker_generic_burst(void *arg)
return 0;
}
-static __rte_always_inline int
-consumer(void)
-{
- const uint64_t freq_khz = rte_get_timer_hz() / 1000;
- struct rte_event packet;
-
- static uint64_t received;
- static uint64_t last_pkts;
- static uint64_t last_time;
- static uint64_t start_time;
- int i;
- uint8_t dev_id = cons_data.dev_id;
- uint8_t port_id = cons_data.port_id;
-
- do {
- uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
- &packet, 1, 0);
-
- if (n == 0) {
- RTE_ETH_FOREACH_DEV(i)
- rte_eth_tx_buffer_flush(i, 0, fdata->tx_buf[i]);
- return 0;
- }
- if (start_time == 0)
- last_time = start_time = rte_get_timer_cycles();
-
- received++;
- uint8_t outport = packet.mbuf->port;
-
- exchange_mac(packet.mbuf);
- rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
- packet.mbuf);
-
- if (cons_data.release)
- rte_event_enqueue_burst(dev_id, port_id,
- &packet, n);
-
- /* Print out mpps every 1<22 packets */
- if (!cdata.quiet && received >= last_pkts + (1<<22)) {
- const uint64_t now = rte_get_timer_cycles();
- const uint64_t total_ms = (now - start_time) / freq_khz;
- const uint64_t delta_ms = (now - last_time) / freq_khz;
- uint64_t delta_pkts = received - last_pkts;
-
- printf("# %s RX=%"PRIu64", time %"PRIu64 "ms, "
- "avg %.3f mpps [current %.3f mpps]\n",
- __func__,
- received,
- total_ms,
- received / (total_ms * 1000.0),
- delta_pkts / (delta_ms * 1000.0));
- last_pkts = received;
- last_time = now;
- }
-
- cdata.num_packets--;
- if (cdata.num_packets <= 0)
- fdata->done = 1;
- /* Be stuck in this loop if single. */
- } while (!fdata->done && fdata->tx_single);
-
- return 0;
-}
-
-static __rte_always_inline int
-consumer_burst(void)
-{
- const uint64_t freq_khz = rte_get_timer_hz() / 1000;
- struct rte_event packets[BATCH_SIZE];
-
- static uint64_t received;
- static uint64_t last_pkts;
- static uint64_t last_time;
- static uint64_t start_time;
- unsigned int i, j;
- uint8_t dev_id = cons_data.dev_id;
- uint8_t port_id = cons_data.port_id;
-
- do {
- uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
- packets, RTE_DIM(packets), 0);
-
- if (n == 0) {
- RTE_ETH_FOREACH_DEV(j)
- rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
- return 0;
- }
- if (start_time == 0)
- last_time = start_time = rte_get_timer_cycles();
-
- received += n;
- for (i = 0; i < n; i++) {
- uint8_t outport = packets[i].mbuf->port;
-
- exchange_mac(packets[i].mbuf);
- rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
- packets[i].mbuf);
-
- packets[i].op = RTE_EVENT_OP_RELEASE;
- }
-
- if (cons_data.release) {
- uint16_t nb_tx;
-
- nb_tx = rte_event_enqueue_burst(dev_id, port_id,
- packets, n);
- while (nb_tx < n)
- nb_tx += rte_event_enqueue_burst(dev_id,
- port_id, packets + nb_tx,
- n - nb_tx);
- }
-
- /* Print out mpps every 1<22 packets */
- if (!cdata.quiet && received >= last_pkts + (1<<22)) {
- const uint64_t now = rte_get_timer_cycles();
- const uint64_t total_ms = (now - start_time) / freq_khz;
- const uint64_t delta_ms = (now - last_time) / freq_khz;
- uint64_t delta_pkts = received - last_pkts;
-
- printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
- "avg %.3f mpps [current %.3f mpps]\n",
- received,
- total_ms,
- received / (total_ms * 1000.0),
- delta_pkts / (delta_ms * 1000.0));
- last_pkts = received;
- last_time = now;
- }
-
- cdata.num_packets -= n;
- if (cdata.num_packets <= 0)
- fdata->done = 1;
- /* Be stuck in this loop if single. */
- } while (!fdata->done && fdata->tx_single);
-
- return 0;
-}
-
static int
-setup_eventdev_generic(struct cons_data *cons_data,
- struct worker_data *worker_data)
+setup_eventdev_generic(struct worker_data *worker_data)
{
const uint8_t dev_id = 0;
/* +1 stages is for a SINGLE_LINK TX stage */
const uint8_t nb_queues = cdata.num_stages + 1;
- /* + 1 is one port for consumer */
- const uint8_t nb_ports = cdata.num_workers + 1;
+ const uint8_t nb_ports = cdata.num_workers;
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
@@ -285,11 +145,6 @@ setup_eventdev_generic(struct cons_data *cons_data,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
};
- struct rte_event_port_conf tx_p_conf = {
- .dequeue_depth = 128,
- .enqueue_depth = 128,
- .new_event_threshold = 4096,
- };
struct rte_event_queue_conf tx_q_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
@@ -297,7 +152,6 @@ setup_eventdev_generic(struct cons_data *cons_data,
struct port_link worker_queues[MAX_NUM_STAGES];
uint8_t disable_implicit_release;
- struct port_link tx_queue;
unsigned int i;
int ret, ndev = rte_event_dev_count();
@@ -314,7 +168,6 @@ setup_eventdev_generic(struct cons_data *cons_data,
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
wkr_p_conf.disable_implicit_release = disable_implicit_release;
- tx_p_conf.disable_implicit_release = disable_implicit_release;
if (dev_info.max_event_port_dequeue_depth <
config.nb_event_port_dequeue_depth)
@@ -372,8 +225,7 @@ setup_eventdev_generic(struct cons_data *cons_data,
printf("%d: error creating qid %d\n", __LINE__, i);
return -1;
}
- tx_queue.queue_id = i;
- tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+ cdata.tx_queue_id = i;
if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
@@ -403,26 +255,6 @@ setup_eventdev_generic(struct cons_data *cons_data,
w->port_id = i;
}
- if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
- tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
- if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
- tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
-
- /* port for consumer, linked to TX queue */
- if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
- printf("Error setting up port %d\n", i);
- return -1;
- }
- if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
- &tx_queue.priority, 1) != 1) {
- printf("%d: error creating link for port %d\n",
- __LINE__, i);
- return -1;
- }
- *cons_data = (struct cons_data){.dev_id = dev_id,
- .port_id = i,
- .release = disable_implicit_release };
-
ret = rte_event_dev_service_id_get(dev_id,
&fdata->evdev_service_id);
if (ret != -ESRCH && ret != 0) {
@@ -431,76 +263,107 @@ setup_eventdev_generic(struct cons_data *cons_data,
}
rte_service_runstate_set(fdata->evdev_service_id, 1);
rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
- if (rte_event_dev_start(dev_id) < 0) {
- printf("Error starting eventdev\n");
- return -1;
- }
return dev_id;
}
static void
-init_rx_adapter(uint16_t nb_ports)
+init_adapters(uint16_t nb_ports)
{
int i;
int ret;
+ uint8_t tx_port_id = 0;
uint8_t evdev_id = 0;
struct rte_event_dev_info dev_info;
ret = rte_event_dev_info_get(evdev_id, &dev_info);
- struct rte_event_port_conf rx_p_conf = {
- .dequeue_depth = 8,
- .enqueue_depth = 8,
- .new_event_threshold = 1200,
+ struct rte_event_port_conf adptr_p_conf = {
+ .dequeue_depth = cdata.worker_cq_depth,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
};
- if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
- rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
- if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
- rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+ if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+ adptr_p_conf.dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ if (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+ adptr_p_conf.enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
/* Create one adapter for all the ethernet ports. */
ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
- &rx_p_conf);
+ &adptr_p_conf);
if (ret)
rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
cdata.rx_adapter_id);
+ ret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,
+ &adptr_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
+ cdata.tx_adapter_id);
+
struct rte_event_eth_rx_adapter_queue_conf queue_conf;
memset(&queue_conf, 0, sizeof(queue_conf));
queue_conf.ev.sched_type = cdata.queue_type;
queue_conf.ev.queue_id = cdata.qid[0];
for (i = 0; i < nb_ports; i++) {
- uint32_t cap;
-
- ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
- if (ret)
- rte_exit(EXIT_FAILURE,
- "failed to get event rx adapter "
- "capabilities");
-
ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
-1, &queue_conf);
if (ret)
rte_exit(EXIT_FAILURE,
"Failed to add queues to Rx adapter");
+
+ ret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,
+ -1);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Tx adapter");
}
+ ret = rte_event_eth_tx_adapter_event_port_get(cdata.tx_adapter_id,
+ &tx_port_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get Tx adapter port id");
+ ret = rte_event_port_link(evdev_id, tx_port_id, &cdata.tx_queue_id,
+ NULL, 1);
+ if (ret != 1)
+ rte_exit(EXIT_FAILURE,
+ "Unable to link Tx adapter port to Tx queue");
+
ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
&fdata->rxadptr_service_id);
if (ret != -ESRCH && ret != 0) {
rte_exit(EXIT_FAILURE,
- "Error getting the service ID for sw eventdev\n");
+ "Error getting the service ID for Rx adapter\n");
}
rte_service_runstate_set(fdata->rxadptr_service_id, 1);
rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
+ ret = rte_event_eth_tx_adapter_service_id_get(cdata.tx_adapter_id,
+ &fdata->txadptr_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Error getting the service ID for Tx adapter\n");
+ }
+ rte_service_runstate_set(fdata->txadptr_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->txadptr_service_id, 0);
+
ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
if (ret)
rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
cdata.rx_adapter_id);
+
+ ret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
+ cdata.tx_adapter_id);
+
+ if (rte_event_dev_start(evdev_id) < 0)
+ rte_exit(EXIT_FAILURE, "Error starting eventdev");
}
static void
@@ -510,6 +373,7 @@ generic_opt_check(void)
int ret;
uint32_t cap = 0;
uint8_t rx_needed = 0;
+ uint8_t sched_needed = 0;
struct rte_event_dev_info eventdev_info;
memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
@@ -519,6 +383,8 @@ generic_opt_check(void)
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
rte_exit(EXIT_FAILURE,
"Event dev doesn't support all type queues\n");
+ sched_needed = !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);
RTE_ETH_FOREACH_DEV(i) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
@@ -531,9 +397,8 @@ generic_opt_check(void)
if (cdata.worker_lcore_mask == 0 ||
(rx_needed && cdata.rx_lcore_mask == 0) ||
- cdata.tx_lcore_mask == 0 || (cdata.sched_lcore_mask == 0
- && !(eventdev_info.event_dev_cap &
- RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
+ (cdata.tx_lcore_mask == 0) ||
+ (sched_needed && cdata.sched_lcore_mask == 0)) {
printf("Core part of pipeline was not assigned any cores. "
"This will stall the pipeline, please check core masks "
"(use -h for details on setting core masks):\n"
@@ -545,23 +410,24 @@ generic_opt_check(void)
rte_exit(-1, "Fix core masks\n");
}
- if (eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)
+ if (!sched_needed)
memset(fdata->sched_core, 0,
sizeof(unsigned int) * MAX_NUM_CORE);
+ if (!rx_needed)
+ memset(fdata->rx_core, 0,
+ sizeof(unsigned int) * MAX_NUM_CORE);
}
void
set_worker_generic_setup_data(struct setup_data *caps, bool burst)
{
if (burst) {
- caps->consumer = consumer_burst;
caps->worker = worker_generic_burst;
} else {
- caps->consumer = consumer;
caps->worker = worker_generic;
}
- caps->adptr_setup = init_rx_adapter;
+ caps->adptr_setup = init_adapters;
caps->scheduler = schedule_devices;
caps->evdev_setup = setup_eventdev_generic;
caps->check_opt = generic_opt_check;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 3dbde92d..85eb075f 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -36,10 +36,11 @@ worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,
}
static __rte_always_inline void
-worker_tx_pkt(struct rte_mbuf *mbuf)
+worker_tx_pkt(const uint8_t dev, const uint8_t port, struct rte_event *ev)
{
- exchange_mac(mbuf);
- while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
+ exchange_mac(ev->mbuf);
+ rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
+ while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1))
rte_pause();
}
@@ -64,15 +65,15 @@ worker_do_tx_single(void *arg)
received++;
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- worker_tx_pkt(ev.mbuf);
+ worker_tx_pkt(dev, port, &ev);
tx++;
- continue;
+ } else {
+ work();
+ ev.queue_id++;
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
}
- work();
- ev.queue_id++;
- worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- worker_event_enqueue(dev, port, &ev);
- fwd++;
}
if (!cdata.quiet)
@@ -100,14 +101,14 @@ worker_do_tx_single_atq(void *arg)
received++;
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- worker_tx_pkt(ev.mbuf);
+ worker_tx_pkt(dev, port, &ev);
tx++;
- continue;
+ } else {
+ work();
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
}
- work();
- worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- worker_event_enqueue(dev, port, &ev);
- fwd++;
}
if (!cdata.quiet)
@@ -141,7 +142,7 @@ worker_do_tx_single_burst(void *arg)
rte_prefetch0(ev[i + 1].mbuf);
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
- worker_tx_pkt(ev[i].mbuf);
+ worker_tx_pkt(dev, port, &ev[i]);
ev[i].op = RTE_EVENT_OP_RELEASE;
tx++;
@@ -188,7 +189,7 @@ worker_do_tx_single_burst_atq(void *arg)
rte_prefetch0(ev[i + 1].mbuf);
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
- worker_tx_pkt(ev[i].mbuf);
+ worker_tx_pkt(dev, port, &ev[i]);
ev[i].op = RTE_EVENT_OP_RELEASE;
tx++;
} else
@@ -232,7 +233,7 @@ worker_do_tx(void *arg)
if (cq_id >= lst_qid) {
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- worker_tx_pkt(ev.mbuf);
+ worker_tx_pkt(dev, port, &ev);
tx++;
continue;
}
@@ -280,7 +281,7 @@ worker_do_tx_atq(void *arg)
if (cq_id == lst_qid) {
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- worker_tx_pkt(ev.mbuf);
+ worker_tx_pkt(dev, port, &ev);
tx++;
continue;
}
@@ -330,7 +331,7 @@ worker_do_tx_burst(void *arg)
if (cq_id >= lst_qid) {
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
- worker_tx_pkt(ev[i].mbuf);
+ worker_tx_pkt(dev, port, &ev[i]);
tx++;
ev[i].op = RTE_EVENT_OP_RELEASE;
continue;
@@ -387,7 +388,7 @@ worker_do_tx_burst_atq(void *arg)
if (cq_id == lst_qid) {
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
- worker_tx_pkt(ev[i].mbuf);
+ worker_tx_pkt(dev, port, &ev[i]);
tx++;
ev[i].op = RTE_EVENT_OP_RELEASE;
continue;
@@ -413,10 +414,8 @@ worker_do_tx_burst_atq(void *arg)
}
static int
-setup_eventdev_worker_tx(struct cons_data *cons_data,
- struct worker_data *worker_data)
+setup_eventdev_worker_tx_enq(struct worker_data *worker_data)
{
- RTE_SET_USED(cons_data);
uint8_t i;
const uint8_t atq = cdata.all_type_queues ? 1 : 0;
const uint8_t dev_id = 0;
@@ -575,10 +574,9 @@ setup_eventdev_worker_tx(struct cons_data *cons_data,
}
rte_service_runstate_set(fdata->evdev_service_id, 1);
rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
- if (rte_event_dev_start(dev_id) < 0) {
- printf("Error starting eventdev\n");
- return -1;
- }
+
+ if (rte_event_dev_start(dev_id) < 0)
+ rte_exit(EXIT_FAILURE, "Error starting eventdev");
return dev_id;
}
@@ -602,7 +600,7 @@ service_rx_adapter(void *arg)
}
static void
-init_rx_adapter(uint16_t nb_ports)
+init_adapters(uint16_t nb_ports)
{
int i;
int ret;
@@ -613,17 +611,18 @@ init_rx_adapter(uint16_t nb_ports)
ret = rte_event_dev_info_get(evdev_id, &dev_info);
adptr_services = rte_zmalloc(NULL, sizeof(struct rx_adptr_services), 0);
- struct rte_event_port_conf rx_p_conf = {
- .dequeue_depth = 8,
- .enqueue_depth = 8,
- .new_event_threshold = 1200,
+ struct rte_event_port_conf adptr_p_conf = {
+ .dequeue_depth = cdata.worker_cq_depth,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
};
- if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
- rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
- if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
- rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
-
+ if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+ adptr_p_conf.dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ if (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+ adptr_p_conf.enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
struct rte_event_eth_rx_adapter_queue_conf queue_conf;
memset(&queue_conf, 0, sizeof(queue_conf));
@@ -633,11 +632,11 @@ init_rx_adapter(uint16_t nb_ports)
uint32_t cap;
uint32_t service_id;
- ret = rte_event_eth_rx_adapter_create(i, evdev_id, &rx_p_conf);
+ ret = rte_event_eth_rx_adapter_create(i, evdev_id,
+ &adptr_p_conf);
if (ret)
rte_exit(EXIT_FAILURE,
- "failed to create rx adapter[%d]",
- cdata.rx_adapter_id);
+ "failed to create rx adapter[%d]", i);
ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
if (ret)
@@ -654,7 +653,6 @@ init_rx_adapter(uint16_t nb_ports)
rte_exit(EXIT_FAILURE,
"Failed to add queues to Rx adapter");
-
/* Producer needs to be scheduled. */
if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
ret = rte_event_eth_rx_adapter_service_id_get(i,
@@ -680,9 +678,29 @@ init_rx_adapter(uint16_t nb_ports)
ret = rte_event_eth_rx_adapter_start(i);
if (ret)
rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
- cdata.rx_adapter_id);
+ i);
}
+ /* We already know that Tx adapter has INTERNAL port cap*/
+ ret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,
+ &adptr_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
+ cdata.tx_adapter_id);
+
+ for (i = 0; i < nb_ports; i++) {
+ ret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,
+ -1);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Tx adapter");
+ }
+
+ ret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
+ cdata.tx_adapter_id);
+
if (adptr_services->nb_rx_adptrs) {
struct rte_service_spec service;
@@ -695,8 +713,7 @@ init_rx_adapter(uint16_t nb_ports)
&fdata->rxadptr_service_id);
if (ret)
rte_exit(EXIT_FAILURE,
- "Rx adapter[%d] service register failed",
- cdata.rx_adapter_id);
+ "Rx adapter service register failed");
rte_service_runstate_set(fdata->rxadptr_service_id, 1);
rte_service_component_runstate_set(fdata->rxadptr_service_id,
@@ -708,23 +725,19 @@ init_rx_adapter(uint16_t nb_ports)
rte_free(adptr_services);
}
- if (!adptr_services->nb_rx_adptrs && fdata->cap.consumer == NULL &&
- (dev_info.event_dev_cap &
+ if (!adptr_services->nb_rx_adptrs && (dev_info.event_dev_cap &
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))
fdata->cap.scheduler = NULL;
-
- if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)
- memset(fdata->sched_core, 0,
- sizeof(unsigned int) * MAX_NUM_CORE);
}
static void
-worker_tx_opt_check(void)
+worker_tx_enq_opt_check(void)
{
int i;
int ret;
uint32_t cap = 0;
uint8_t rx_needed = 0;
+ uint8_t sched_needed = 0;
struct rte_event_dev_info eventdev_info;
memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
@@ -734,32 +747,38 @@ worker_tx_opt_check(void)
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
rte_exit(EXIT_FAILURE,
"Event dev doesn't support all type queues\n");
+ sched_needed = !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);
RTE_ETH_FOREACH_DEV(i) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
rte_exit(EXIT_FAILURE,
- "failed to get event rx adapter "
- "capabilities");
+ "failed to get event rx adapter capabilities");
rx_needed |=
!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
}
if (cdata.worker_lcore_mask == 0 ||
(rx_needed && cdata.rx_lcore_mask == 0) ||
- (cdata.sched_lcore_mask == 0 &&
- !(eventdev_info.event_dev_cap &
- RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
+ (sched_needed && cdata.sched_lcore_mask == 0)) {
printf("Core part of pipeline was not assigned any cores. "
"This will stall the pipeline, please check core masks "
"(use -h for details on setting core masks):\n"
- "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
- "\n\tworkers: %"PRIu64"\n",
- cdata.rx_lcore_mask, cdata.tx_lcore_mask,
- cdata.sched_lcore_mask,
- cdata.worker_lcore_mask);
+ "\trx: %"PRIu64"\n\tsched: %"PRIu64
+ "\n\tworkers: %"PRIu64"\n", cdata.rx_lcore_mask,
+ cdata.sched_lcore_mask, cdata.worker_lcore_mask);
rte_exit(-1, "Fix core masks\n");
}
+
+ if (!sched_needed)
+ memset(fdata->sched_core, 0,
+ sizeof(unsigned int) * MAX_NUM_CORE);
+ if (!rx_needed)
+ memset(fdata->rx_core, 0,
+ sizeof(unsigned int) * MAX_NUM_CORE);
+
+ memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
}
static worker_loop
@@ -821,18 +840,15 @@ get_worker_multi_stage(bool burst)
}
void
-set_worker_tx_setup_data(struct setup_data *caps, bool burst)
+set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst)
{
if (cdata.num_stages == 1)
caps->worker = get_worker_single_stage(burst);
else
caps->worker = get_worker_multi_stage(burst);
- memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
-
- caps->check_opt = worker_tx_opt_check;
- caps->consumer = NULL;
+ caps->check_opt = worker_tx_enq_opt_check;
caps->scheduler = schedule_devices;
- caps->evdev_setup = setup_eventdev_worker_tx;
- caps->adptr_setup = init_rx_adapter;
+ caps->evdev_setup = setup_eventdev_worker_tx_enq;
+ caps->adptr_setup = init_adapters;
}
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index 440422bc..4180a868 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -87,9 +87,6 @@
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
- .rxmode = {
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- },
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
},
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index ce91e8a6..a73d120e 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -121,7 +121,6 @@ init_port(void)
struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.offloads =
@@ -132,22 +131,6 @@ init_port(void)
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO,
},
- /*
- * Initialize fdir_conf of rte_eth_conf.
- * Fdir is used in flow filtering for I40e,
- * so rte_flow rules involve some fdir
- * configurations. In long term it's better
- * that drivers don't require any fdir
- * configuration for rte_flow, but we need to
- * get this workaround so that sample app can
- * run on I40e.
- */
- .fdir_conf = {
- .mode = RTE_FDIR_MODE_PERFECT,
- .pballoc = RTE_FDIR_PBALLOC_64K,
- .status = RTE_FDIR_REPORT_STATUS,
- .drop_queue = 127,
- },
};
struct rte_eth_txconf txq_conf;
struct rte_eth_rxconf rxq_conf;
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 5306d767..17a877da 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -141,8 +141,7 @@ static struct rte_eth_conf port_conf = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP),
+ DEV_RX_OFFLOAD_JUMBO_FRAME),
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index 3fb98ce3..41ba7df2 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -18,6 +18,7 @@ SRCS-y += swq.c
SRCS-y += tap.c
SRCS-y += thread.c
SRCS-y += tmgr.c
+SRCS-y += cryptodev.c
# Build using pkg-config variables if possible
$(shell pkg-config --exists libdpdk)
diff --git a/examples/ip_pipeline/action.c b/examples/ip_pipeline/action.c
index a29c2b36..d2104aad 100644
--- a/examples/ip_pipeline/action.c
+++ b/examples/ip_pipeline/action.c
@@ -7,9 +7,9 @@
#include <string.h>
#include <rte_string_fns.h>
+#include <rte_table_hash_func.h>
#include "action.h"
-#include "hash_func.h"
/**
* Input port
@@ -57,35 +57,35 @@ port_in_action_profile_create(const char *name,
(params->lb.f_hash == NULL)) {
switch (params->lb.key_size) {
case 8:
- params->lb.f_hash = hash_default_key8;
+ params->lb.f_hash = rte_table_hash_crc_key8;
break;
case 16:
- params->lb.f_hash = hash_default_key16;
+ params->lb.f_hash = rte_table_hash_crc_key16;
break;
case 24:
- params->lb.f_hash = hash_default_key24;
+ params->lb.f_hash = rte_table_hash_crc_key24;
break;
case 32:
- params->lb.f_hash = hash_default_key32;
+ params->lb.f_hash = rte_table_hash_crc_key32;
break;
case 40:
- params->lb.f_hash = hash_default_key40;
+ params->lb.f_hash = rte_table_hash_crc_key40;
break;
case 48:
- params->lb.f_hash = hash_default_key48;
+ params->lb.f_hash = rte_table_hash_crc_key48;
break;
case 56:
- params->lb.f_hash = hash_default_key56;
+ params->lb.f_hash = rte_table_hash_crc_key56;
break;
case 64:
- params->lb.f_hash = hash_default_key64;
+ params->lb.f_hash = rte_table_hash_crc_key64;
break;
default:
@@ -192,35 +192,35 @@ table_action_profile_create(const char *name,
(params->lb.f_hash == NULL)) {
switch (params->lb.key_size) {
case 8:
- params->lb.f_hash = hash_default_key8;
+ params->lb.f_hash = rte_table_hash_crc_key8;
break;
case 16:
- params->lb.f_hash = hash_default_key16;
+ params->lb.f_hash = rte_table_hash_crc_key16;
break;
case 24:
- params->lb.f_hash = hash_default_key24;
+ params->lb.f_hash = rte_table_hash_crc_key24;
break;
case 32:
- params->lb.f_hash = hash_default_key32;
+ params->lb.f_hash = rte_table_hash_crc_key32;
break;
case 40:
- params->lb.f_hash = hash_default_key40;
+ params->lb.f_hash = rte_table_hash_crc_key40;
break;
case 48:
- params->lb.f_hash = hash_default_key48;
+ params->lb.f_hash = rte_table_hash_crc_key48;
break;
case 56:
- params->lb.f_hash = hash_default_key56;
+ params->lb.f_hash = rte_table_hash_crc_key56;
break;
case 64:
- params->lb.f_hash = hash_default_key64;
+ params->lb.f_hash = rte_table_hash_crc_key64;
break;
default:
@@ -333,6 +333,39 @@ table_action_profile_create(const char *name,
}
}
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_SYM_CRYPTO,
+ &params->sym_crypto);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TAG,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_DECAP,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
status = rte_table_action_profile_freeze(ap);
if (status) {
rte_table_action_profile_free(ap);
diff --git a/examples/ip_pipeline/action.h b/examples/ip_pipeline/action.h
index 417200e8..cde17e69 100644
--- a/examples/ip_pipeline/action.h
+++ b/examples/ip_pipeline/action.h
@@ -53,6 +53,7 @@ struct table_action_profile_params {
struct rte_table_action_nat_config nat;
struct rte_table_action_ttl_config ttl;
struct rte_table_action_stats_config stats;
+ struct rte_table_action_sym_crypto_config sym_crypto;
};
struct table_action_profile {
diff --git a/examples/ip_pipeline/cli.c b/examples/ip_pipeline/cli.c
index 102a1d6b..d1e55407 100644
--- a/examples/ip_pipeline/cli.c
+++ b/examples/ip_pipeline/cli.c
@@ -12,6 +12,8 @@
#include <rte_ethdev.h>
#include "cli.h"
+
+#include "cryptodev.h"
#include "kni.h"
#include "link.h"
#include "mempool.h"
@@ -785,6 +787,65 @@ cmd_kni(char **tokens,
}
}
+static const char cmd_cryptodev_help[] =
+"cryptodev <cryptodev_name>\n"
+" dev <device_name> | dev_id <device_id>\n"
+" queue <n_queues> <queue_size>\n";
+
+static void
+cmd_cryptodev(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct cryptodev_params params;
+ char *name;
+
+ memset(&params, 0, sizeof(params));
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "dev") == 0)
+ params.dev_name = tokens[3];
+ else if (strcmp(tokens[2], "dev_id") == 0) {
+ if (parser_read_uint32(&params.dev_id, tokens[3]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "dev_id");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "cryptodev");
+ return;
+ }
+
+ if (strcmp(tokens[4], "queue")) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "4");
+ return;
+ }
+
+ if (parser_read_uint32(&params.n_queues, tokens[5]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "q");
+ return;
+ }
+
+ if (parser_read_uint32(&params.queue_size, tokens[6]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_size");
+ return;
+ }
+
+ if (cryptodev_create(name, &params) == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
static const char cmd_port_in_action_profile_help[] =
"port in action profile <profile_name>\n"
@@ -961,13 +1022,19 @@ static const char cmd_table_action_profile_help[] =
" tc <n_tc>\n"
" stats none | pkts | bytes | both]\n"
" [tm spp <n_subports_per_port> pps <n_pipes_per_subport>]\n"
-" [encap ether | vlan | qinq | mpls | pppoe]\n"
+" [encap ether | vlan | qinq | mpls | pppoe |\n"
+" vxlan offset <ether_offset> ipv4 | ipv6 vlan on | off]\n"
" [nat src | dst\n"
" proto udp | tcp]\n"
" [ttl drop | fwd\n"
" stats none | pkts]\n"
" [stats pkts | bytes | both]\n"
-" [time]\n";
+" [time]\n"
+" [sym_crypto dev <CRYPTODEV_NAME> offset <op_offset> "
+" mempool_create <mempool_name>\n"
+" mempool_init <mempool_name>]\n"
+" [tag]\n"
+" [decap]\n";
static void
cmd_table_action_profile(char **tokens,
@@ -1157,6 +1224,8 @@ cmd_table_action_profile(char **tokens,
} /* tm */
if ((t0 < n_tokens) && (strcmp(tokens[t0], "encap") == 0)) {
+ uint32_t n_extra_tokens = 0;
+
if (n_tokens < t0 + 2) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
"action profile encap");
@@ -1173,13 +1242,61 @@ cmd_table_action_profile(char **tokens,
p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS;
else if (strcmp(tokens[t0 + 1], "pppoe") == 0)
p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE;
- else {
+ else if (strcmp(tokens[t0 + 1], "vxlan") == 0) {
+ if (n_tokens < t0 + 2 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "action profile encap vxlan");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "vxlan: offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.encap.vxlan.data_offset,
+ tokens[t0 + 2 + 1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: ether_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 2], "ipv4") == 0)
+ p.encap.vxlan.ip_version = 1;
+ else if (strcmp(tokens[t0 + 2 + 2], "ipv6") == 0)
+ p.encap.vxlan.ip_version = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 3], "vlan") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "vxlan: vlan");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 4], "on") == 0)
+ p.encap.vxlan.vlan = 1;
+ else if (strcmp(tokens[t0 + 2 + 4], "off") == 0)
+ p.encap.vxlan.vlan = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: on or off");
+ return;
+ }
+
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN;
+ n_extra_tokens = 5;
+ } else {
snprintf(out, out_size, MSG_ARG_MISMATCH, "encap");
return;
}
p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP;
- t0 += 2;
+ t0 += 2 + n_extra_tokens;
} /* encap */
if ((t0 < n_tokens) && (strcmp(tokens[t0], "nat") == 0)) {
@@ -1285,6 +1402,67 @@ cmd_table_action_profile(char **tokens,
t0 += 1;
} /* time */
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "sym_crypto") == 0)) {
+ struct cryptodev *cryptodev;
+ struct mempool *mempool;
+
+ if (n_tokens < t0 + 9 ||
+ strcmp(tokens[t0 + 1], "dev") ||
+ strcmp(tokens[t0 + 3], "offset") ||
+ strcmp(tokens[t0 + 5], "mempool_create") ||
+ strcmp(tokens[t0 + 7], "mempool_init")) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile sym_crypto");
+ return;
+ }
+
+ cryptodev = cryptodev_find(tokens[t0 + 2]);
+ if (cryptodev == NULL) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "table action profile sym_crypto");
+ return;
+ }
+
+ p.sym_crypto.cryptodev_id = cryptodev->dev_id;
+
+ if (parser_read_uint32(&p.sym_crypto.op_offset,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "table action profile sym_crypto");
+ return;
+ }
+
+ mempool = mempool_find(tokens[t0 + 6]);
+ if (mempool == NULL) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "table action profile sym_crypto");
+ return;
+ }
+ p.sym_crypto.mp_create = mempool->m;
+
+ mempool = mempool_find(tokens[t0 + 8]);
+ if (mempool == NULL) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "table action profile sym_crypto");
+ return;
+ }
+ p.sym_crypto.mp_init = mempool->m;
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_SYM_CRYPTO;
+
+ t0 += 9;
+ } /* sym_crypto */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "tag") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TAG;
+ t0 += 1;
+ } /* tag */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "decap") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_DECAP;
+ t0 += 1;
+ } /* decap */
+
if (t0 < n_tokens) {
snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
return;
@@ -1366,6 +1544,7 @@ static const char cmd_pipeline_port_in_help[] =
" | tap <tap_name> mempool <mempool_name> mtu <mtu>\n"
" | kni <kni_name>\n"
" | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>\n"
+" | cryptodev <cryptodev_name> rxq <queue_id>\n"
" [action <port_in_action_profile_name>]\n"
" [disabled]\n";
@@ -1538,6 +1717,26 @@ cmd_pipeline_port_in(char **tokens,
}
t0 += 7;
+ } else if (strcmp(tokens[t0], "cryptodev") == 0) {
+ if (n_tokens < t0 + 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in cryptodev");
+ return;
+ }
+
+ p.type = PORT_IN_CRYPTODEV;
+
+ p.dev_name = tokens[t0 + 1];
+ if (parser_read_uint16(&p.rxq.queue_id, tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "rxq");
+ return;
+ }
+
+ p.cryptodev.arg_callback = NULL;
+ p.cryptodev.f_callback = NULL;
+
+ t0 += 4;
} else {
snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
return;
@@ -1584,7 +1783,8 @@ static const char cmd_pipeline_port_out_help[] =
" | tmgr <tmgr_name>\n"
" | tap <tap_name>\n"
" | kni <kni_name>\n"
-" | sink [file <file_name> pkts <max_n_pkts>]\n";
+" | sink [file <file_name> pkts <max_n_pkts>]\n"
+" | cryptodev <cryptodev_name> txq <txq_id> offset <crypto_op_offset>\n";
static void
cmd_pipeline_port_out(char **tokens,
@@ -1718,6 +1918,41 @@ cmd_pipeline_port_out(char **tokens,
return;
}
}
+
+ } else if (strcmp(tokens[6], "cryptodev") == 0) {
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ p.type = PORT_OUT_CRYPTODEV;
+
+ p.dev_name = tokens[7];
+
+ if (strcmp(tokens[8], "txq")) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ if (parser_read_uint16(&p.cryptodev.queue_id, tokens[9])
+ != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
+
+ if (strcmp(tokens[10], "offset")) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ if (parser_read_uint32(&p.cryptodev.op_offset, tokens[11])
+ != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
} else {
snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
return;
@@ -2861,11 +3096,31 @@ parse_match(char **tokens,
* [label1 <label> <tc> <ttl>
* [label2 <label> <tc> <ttl>
* [label3 <label> <tc> <ttl>]]]
- * | pppoe <da> <sa> <session_id>]
+ * | pppoe <da> <sa> <session_id>
+ * | vxlan ether <da> <sa>
+ * [vlan <pcp> <dei> <vid>]
+ * ipv4 <sa> <da> <dscp> <ttl>
+ * | ipv6 <sa> <da> <flow_label> <dscp> <hop_limit>
+ * udp <sp> <dp>
+ * vxlan <vni>]
* [nat ipv4 | ipv6 <addr> <port>]
* [ttl dec | keep]
* [stats]
* [time]
+ * [sym_crypto
+ * encrypt | decrypt
+ * type
+ * | cipher
+ * cipher_algo <algo> cipher_key <key> cipher_iv <iv>
+ * | cipher_auth
+ * cipher_algo <algo> cipher_key <key> cipher_iv <iv>
+ * auth_algo <algo> auth_key <key> digest_size <size>
+ * | aead
+ * aead_algo <algo> aead_key <key> aead_iv <iv> aead_aad <aad>
+ * digest_size <size>
+ * data_offset <data_offset>]
+ * [tag <tag>]
+ * [decap <n>]
*
* where:
* <pa> ::= g | y | r | drop
@@ -3254,6 +3509,122 @@ parse_table_action_encap(char **tokens,
return 1 + 4;
}
+ /* vxlan */
+ if (n_tokens && (strcmp(tokens[0], "vxlan") == 0)) {
+ uint32_t n = 0;
+
+ n_tokens--;
+ tokens++;
+ n++;
+
+ /* ether <da> <sa> */
+ if ((n_tokens < 3) ||
+ strcmp(tokens[0], "ether") ||
+ parse_mac_addr(tokens[1], &a->encap.vxlan.ether.da) ||
+ parse_mac_addr(tokens[2], &a->encap.vxlan.ether.sa))
+ return 0;
+
+ n_tokens -= 3;
+ tokens += 3;
+ n += 3;
+
+ /* [vlan <pcp> <dei> <vid>] */
+ if (strcmp(tokens[0], "vlan") == 0) {
+ uint32_t pcp, dei, vid;
+
+ if ((n_tokens < 4) ||
+ parser_read_uint32(&pcp, tokens[1]) ||
+ (pcp > 7) ||
+ parser_read_uint32(&dei, tokens[2]) ||
+ (dei > 1) ||
+ parser_read_uint32(&vid, tokens[3]) ||
+ (vid > 0xFFF))
+ return 0;
+
+ a->encap.vxlan.vlan.pcp = pcp;
+ a->encap.vxlan.vlan.dei = dei;
+ a->encap.vxlan.vlan.vid = vid;
+
+ n_tokens -= 4;
+ tokens += 4;
+ n += 4;
+ }
+
+ /* ipv4 <sa> <da> <dscp> <ttl>
+ | ipv6 <sa> <da> <flow_label> <dscp> <hop_limit> */
+ if (strcmp(tokens[0], "ipv4") == 0) {
+ struct in_addr sa, da;
+ uint8_t dscp, ttl;
+
+ if ((n_tokens < 5) ||
+ parse_ipv4_addr(tokens[1], &sa) ||
+ parse_ipv4_addr(tokens[2], &da) ||
+ parser_read_uint8(&dscp, tokens[3]) ||
+ (dscp > 64) ||
+ parser_read_uint8(&ttl, tokens[4]))
+ return 0;
+
+ a->encap.vxlan.ipv4.sa = rte_be_to_cpu_32(sa.s_addr);
+ a->encap.vxlan.ipv4.da = rte_be_to_cpu_32(da.s_addr);
+ a->encap.vxlan.ipv4.dscp = dscp;
+ a->encap.vxlan.ipv4.ttl = ttl;
+
+ n_tokens -= 5;
+ tokens += 5;
+ n += 5;
+ } else if (strcmp(tokens[0], "ipv6") == 0) {
+ struct in6_addr sa, da;
+ uint32_t flow_label;
+ uint8_t dscp, hop_limit;
+
+ if ((n_tokens < 6) ||
+ parse_ipv6_addr(tokens[1], &sa) ||
+ parse_ipv6_addr(tokens[2], &da) ||
+ parser_read_uint32(&flow_label, tokens[3]) ||
+ parser_read_uint8(&dscp, tokens[4]) ||
+ (dscp > 64) ||
+ parser_read_uint8(&hop_limit, tokens[5]))
+ return 0;
+
+ memcpy(a->encap.vxlan.ipv6.sa, sa.s6_addr, 16);
+ memcpy(a->encap.vxlan.ipv6.da, da.s6_addr, 16);
+ a->encap.vxlan.ipv6.flow_label = flow_label;
+ a->encap.vxlan.ipv6.dscp = dscp;
+ a->encap.vxlan.ipv6.hop_limit = hop_limit;
+
+ n_tokens -= 6;
+ tokens += 6;
+ n += 6;
+ } else
+ return 0;
+
+ /* udp <sp> <dp> */
+ if ((n_tokens < 3) ||
+ strcmp(tokens[0], "udp") ||
+ parser_read_uint16(&a->encap.vxlan.udp.sp, tokens[1]) ||
+ parser_read_uint16(&a->encap.vxlan.udp.dp, tokens[2]))
+ return 0;
+
+ n_tokens -= 3;
+ tokens += 3;
+ n += 3;
+
+ /* vxlan <vni> */
+ if ((n_tokens < 2) ||
+ strcmp(tokens[0], "vxlan") ||
+ parser_read_uint32(&a->encap.vxlan.vxlan.vni, tokens[1]) ||
+ (a->encap.vxlan.vxlan.vni > 0xFFFFFF))
+ return 0;
+
+ n_tokens -= 2;
+ tokens += 2;
+ n += 2;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + n;
+ }
+
return 0;
}
@@ -3348,6 +3719,400 @@ parse_table_action_time(char **tokens,
return 1;
}
+static void
+parse_free_sym_crypto_param_data(struct rte_table_action_sym_crypto_params *p)
+{
+ struct rte_crypto_sym_xform *xform[2] = {NULL};
+ uint32_t i;
+
+ xform[0] = p->xform;
+ if (xform[0])
+ xform[1] = xform[0]->next;
+
+ for (i = 0; i < 2; i++) {
+ if (xform[i] == NULL)
+ continue;
+
+ switch (xform[i]->type) {
+ case RTE_CRYPTO_SYM_XFORM_CIPHER:
+ if (xform[i]->cipher.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->cipher_auth.cipher_iv.val)
+ free(p->cipher_auth.cipher_iv.val);
+ if (p->cipher_auth.cipher_iv_update.val)
+ free(p->cipher_auth.cipher_iv_update.val);
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AUTH:
+ if (xform[i]->auth.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->cipher_auth.auth_iv.val)
+ free(p->cipher_auth.cipher_iv.val);
+ if (p->cipher_auth.auth_iv_update.val)
+ free(p->cipher_auth.cipher_iv_update.val);
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ if (xform[i]->aead.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->aead.iv.val)
+ free(p->aead.iv.val);
+ if (p->aead.aad.val)
+ free(p->aead.aad.val);
+ break;
+ default:
+ continue;
+ }
+ }
+
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_cipher(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_cipher;
+ int status;
+ size_t len;
+
+ if (n_tokens < 7 || strcmp(tokens[1], "cipher_algo") ||
+ strcmp(tokens[3], "cipher_key") ||
+ strcmp(tokens[5], "cipher_iv"))
+ return NULL;
+
+ xform_cipher = calloc(1, sizeof(*xform_cipher));
+ if (xform_cipher == NULL)
+ return NULL;
+
+ xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform_cipher->cipher.op = encrypt ? RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ /* cipher_algo */
+ status = rte_cryptodev_get_cipher_algo_enum(
+ &xform_cipher->cipher.algo, tokens[2]);
+ if (status < 0)
+ goto error_exit;
+
+ /* cipher_key */
+ len = strlen(tokens[4]);
+ xform_cipher->cipher.key.data = calloc(1, len / 2 + 1);
+ if (xform_cipher->cipher.key.data == NULL)
+ goto error_exit;
+
+ status = parse_hex_string(tokens[4],
+ xform_cipher->cipher.key.data,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher->cipher.key.length = (uint16_t)len;
+
+ /* cipher_iv */
+ len = strlen(tokens[6]);
+
+ p->cipher_auth.cipher_iv.val = calloc(1, len / 2 + 1);
+ if (p->cipher_auth.cipher_iv.val == NULL)
+ goto error_exit;
+
+ status = parse_hex_string(tokens[6],
+ p->cipher_auth.cipher_iv.val,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher->cipher.iv.length = (uint16_t)len;
+ xform_cipher->cipher.iv.offset = RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET;
+ p->cipher_auth.cipher_iv.length = (uint32_t)len;
+ *used_n_tokens = 7;
+
+ return xform_cipher;
+
+error_exit:
+ if (xform_cipher->cipher.key.data)
+ free(xform_cipher->cipher.key.data);
+
+ if (p->cipher_auth.cipher_iv.val) {
+ free(p->cipher_auth.cipher_iv.val);
+ p->cipher_auth.cipher_iv.val = NULL;
+ }
+
+ free(xform_cipher);
+
+ return NULL;
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_cipher_auth(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_cipher;
+ struct rte_crypto_sym_xform *xform_auth;
+ int status;
+ size_t len;
+
+ if (n_tokens < 13 ||
+ strcmp(tokens[7], "auth_algo") ||
+ strcmp(tokens[9], "auth_key") ||
+ strcmp(tokens[11], "digest_size"))
+ return NULL;
+
+ xform_auth = calloc(1, sizeof(*xform_auth));
+ if (xform_auth == NULL)
+ return NULL;
+
+ xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ xform_auth->auth.op = encrypt ? RTE_CRYPTO_AUTH_OP_GENERATE :
+ RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ /* auth_algo */
+ status = rte_cryptodev_get_auth_algo_enum(&xform_auth->auth.algo,
+ tokens[8]);
+ if (status < 0)
+ goto error_exit;
+
+ /* auth_key */
+ len = strlen(tokens[10]);
+ xform_auth->auth.key.data = calloc(1, len / 2 + 1);
+ if (xform_auth->auth.key.data == NULL)
+ goto error_exit;
+
+ status = parse_hex_string(tokens[10],
+ xform_auth->auth.key.data, (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_auth->auth.key.length = (uint16_t)len;
+
+ if (strcmp(tokens[11], "digest_size"))
+ goto error_exit;
+
+ status = parser_read_uint16(&xform_auth->auth.digest_length,
+ tokens[12]);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher = parse_table_action_cipher(p, tokens, 7, encrypt,
+ used_n_tokens);
+ if (xform_cipher == NULL)
+ goto error_exit;
+
+ *used_n_tokens += 6;
+
+ if (encrypt) {
+ xform_cipher->next = xform_auth;
+ return xform_cipher;
+ } else {
+ xform_auth->next = xform_cipher;
+ return xform_auth;
+ }
+
+error_exit:
+ if (xform_auth->auth.key.data)
+ free(xform_auth->auth.key.data);
+ if (p->cipher_auth.auth_iv.val) {
+ free(p->cipher_auth.auth_iv.val);
+ p->cipher_auth.auth_iv.val = 0;
+ }
+
+ free(xform_auth);
+
+ return NULL;
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_aead(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_aead;
+ int status;
+ size_t len;
+
+ if (n_tokens < 11 || strcmp(tokens[1], "aead_algo") ||
+ strcmp(tokens[3], "aead_key") ||
+ strcmp(tokens[5], "aead_iv") ||
+ strcmp(tokens[7], "aead_aad") ||
+ strcmp(tokens[9], "digest_size"))
+ return NULL;
+
+ xform_aead = calloc(1, sizeof(*xform_aead));
+ if (xform_aead == NULL)
+ return NULL;
+
+ xform_aead->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ xform_aead->aead.op = encrypt ? RTE_CRYPTO_AEAD_OP_ENCRYPT :
+ RTE_CRYPTO_AEAD_OP_DECRYPT;
+
+ /* aead_algo */
+ status = rte_cryptodev_get_aead_algo_enum(&xform_aead->aead.algo,
+ tokens[2]);
+ if (status < 0)
+ goto error_exit;
+
+ /* aead_key */
+ len = strlen(tokens[4]);
+ xform_aead->aead.key.data = calloc(1, len / 2 + 1);
+ if (xform_aead->aead.key.data == NULL)
+ goto error_exit;
+
+ status = parse_hex_string(tokens[4], xform_aead->aead.key.data,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.key.length = (uint16_t)len;
+
+ /* aead_iv */
+ len = strlen(tokens[6]);
+ p->aead.iv.val = calloc(1, len / 2 + 1);
+ if (p->aead.iv.val == NULL)
+ goto error_exit;
+
+ status = parse_hex_string(tokens[6], p->aead.iv.val,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.iv.length = (uint16_t)len;
+ xform_aead->aead.iv.offset = RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET;
+ p->aead.iv.length = (uint32_t)len;
+
+ /* aead_aad */
+ len = strlen(tokens[8]);
+ p->aead.aad.val = calloc(1, len / 2 + 1);
+ if (p->aead.aad.val == NULL)
+ goto error_exit;
+
+ status = parse_hex_string(tokens[8], p->aead.aad.val, (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.aad_length = (uint16_t)len;
+ p->aead.aad.length = (uint32_t)len;
+
+ /* digest_size */
+ status = parser_read_uint16(&xform_aead->aead.digest_length,
+ tokens[10]);
+ if (status < 0)
+ goto error_exit;
+
+ *used_n_tokens = 11;
+
+ return xform_aead;
+
+error_exit:
+ if (xform_aead->aead.key.data)
+ free(xform_aead->aead.key.data);
+ if (p->aead.iv.val) {
+ free(p->aead.iv.val);
+ p->aead.iv.val = NULL;
+ }
+ if (p->aead.aad.val) {
+ free(p->aead.aad.val);
+ p->aead.aad.val = NULL;
+ }
+
+ free(xform_aead);
+
+ return NULL;
+}
+
+
+static uint32_t
+parse_table_action_sym_crypto(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ struct rte_table_action_sym_crypto_params *p = &a->sym_crypto;
+ struct rte_crypto_sym_xform *xform = NULL;
+ uint32_t used_n_tokens;
+ uint32_t encrypt;
+ int status;
+
+ if ((n_tokens < 12) ||
+ strcmp(tokens[0], "sym_crypto") ||
+ strcmp(tokens[2], "type"))
+ return 0;
+
+ memset(p, 0, sizeof(*p));
+
+ if (strcmp(tokens[1], "encrypt") == 0)
+ encrypt = 1;
+ else
+ encrypt = 0;
+
+ status = parser_read_uint32(&p->data_offset, tokens[n_tokens - 1]);
+ if (status < 0)
+ return 0;
+
+ if (strcmp(tokens[3], "cipher") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_cipher(p, tokens, n_tokens, encrypt,
+ &used_n_tokens);
+ } else if (strcmp(tokens[3], "cipher_auth") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_cipher_auth(p, tokens, n_tokens,
+ encrypt, &used_n_tokens);
+ } else if (strcmp(tokens[3], "aead") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_aead(p, tokens, n_tokens, encrypt,
+ &used_n_tokens);
+ }
+
+ if (xform == NULL)
+ return 0;
+
+ p->xform = xform;
+
+ if (strcmp(tokens[used_n_tokens], "data_offset")) {
+ parse_free_sym_crypto_param_data(p);
+ return 0;
+ }
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_SYM_CRYPTO;
+
+ return used_n_tokens + 5;
+}
+
+static uint32_t
+parse_table_action_tag(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens < 2) ||
+ strcmp(tokens[0], "tag"))
+ return 0;
+
+ if (parser_read_uint32(&a->tag.tag, tokens[1]))
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TAG;
+ return 2;
+}
+
+static uint32_t
+parse_table_action_decap(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens < 2) ||
+ strcmp(tokens[0], "decap"))
+ return 0;
+
+ if (parser_read_uint16(&a->decap.n, tokens[1]))
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_DECAP;
+ return 2;
+}
+
static uint32_t
parse_table_action(char **tokens,
uint32_t n_tokens,
@@ -3492,6 +4257,47 @@ parse_table_action(char **tokens,
n_tokens -= n;
}
+ if (n_tokens && (strcmp(tokens[0], "sym_crypto") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_sym_crypto(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action sym_crypto");
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "tag") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_tag(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action tag");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "decap") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_decap(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action decap");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
if (n_tokens0 - n_tokens == 1) {
snprintf(out, out_size, MSG_ARG_INVALID, "action");
return 0;
@@ -3579,6 +4385,9 @@ cmd_pipeline_table_rule_add(char **tokens,
snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
return;
}
+
+ if (a.action_mask & 1 << RTE_TABLE_ACTION_SYM_CRYPTO)
+ parse_free_sym_crypto_param_data(&a.sym_crypto);
}
@@ -4570,6 +5379,11 @@ cmd_help(char **tokens, uint32_t n_tokens, char *out, size_t out_size)
return;
}
+ if (strcmp(tokens[0], "cryptodev") == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_cryptodev_help);
+ return;
+ }
+
if ((n_tokens == 4) &&
(strcmp(tokens[0], "port") == 0) &&
(strcmp(tokens[1], "in") == 0) &&
@@ -4860,6 +5674,11 @@ cli_process(char *in, char *out, size_t out_size)
return;
}
+ if (strcmp(tokens[0], "cryptodev") == 0) {
+ cmd_cryptodev(tokens, n_tokens, out, out_size);
+ return;
+ }
+
if (strcmp(tokens[0], "port") == 0) {
cmd_port_in_action_profile(tokens, n_tokens, out, out_size);
return;
diff --git a/examples/ip_pipeline/conn.c b/examples/ip_pipeline/conn.c
index 6b08e9e8..30fca80c 100644
--- a/examples/ip_pipeline/conn.c
+++ b/examples/ip_pipeline/conn.c
@@ -8,7 +8,6 @@
#include <unistd.h>
#include <sys/types.h>
-#define __USE_GNU
#include <sys/socket.h>
#include <sys/epoll.h>
diff --git a/examples/ip_pipeline/cryptodev.c b/examples/ip_pipeline/cryptodev.c
new file mode 100644
index 00000000..c4ba72be
--- /dev/null
+++ b/examples/ip_pipeline/cryptodev.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_string_fns.h>
+
+#include "cryptodev.h"
+
+static struct cryptodev_list cryptodev_list;
+
+int
+cryptodev_init(void)
+{
+ TAILQ_INIT(&cryptodev_list);
+
+ return 0;
+}
+
+struct cryptodev *
+cryptodev_find(const char *name)
+{
+ struct cryptodev *cryptodev;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(cryptodev, &cryptodev_list, node)
+ if (strcmp(cryptodev->name, name) == 0)
+ return cryptodev;
+
+ return NULL;
+}
+
+struct cryptodev *
+cryptodev_next(struct cryptodev *cryptodev)
+{
+ return (cryptodev == NULL) ?
+ TAILQ_FIRST(&cryptodev_list) :
+ TAILQ_NEXT(cryptodev, node);
+}
+
+struct cryptodev *
+cryptodev_create(const char *name, struct cryptodev_params *params)
+{
+ struct rte_cryptodev_info dev_info;
+ struct rte_cryptodev_config dev_conf;
+ struct rte_cryptodev_qp_conf queue_conf;
+ struct cryptodev *cryptodev;
+ uint32_t dev_id, i;
+ uint32_t socket_id;
+ int status;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ cryptodev_find(name) ||
+ (params->n_queues == 0) ||
+ (params->queue_size == 0))
+ return NULL;
+
+ if (params->dev_name) {
+ status = rte_cryptodev_get_dev_id(params->dev_name);
+ if (status == -1)
+ return NULL;
+
+ dev_id = (uint32_t)status;
+ } else {
+ if (rte_cryptodev_pmd_is_valid_dev(params->dev_id) == 0)
+ return NULL;
+
+ dev_id = params->dev_id;
+ }
+
+ socket_id = rte_cryptodev_socket_id(dev_id);
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
+ if (dev_info.max_nb_queue_pairs < params->n_queues)
+ return NULL;
+ if (dev_info.feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED)
+ return NULL;
+
+ dev_conf.socket_id = socket_id;
+ dev_conf.nb_queue_pairs = params->n_queues;
+
+ status = rte_cryptodev_configure(dev_id, &dev_conf);
+ if (status < 0)
+ return NULL;
+
+ queue_conf.nb_descriptors = params->queue_size;
+ for (i = 0; i < params->n_queues; i++) {
+ status = rte_cryptodev_queue_pair_setup(dev_id, i,
+ &queue_conf, socket_id, NULL);
+ if (status < 0)
+ return NULL;
+ }
+
+ if (rte_cryptodev_start(dev_id) < 0)
+ return NULL;
+
+ cryptodev = calloc(1, sizeof(struct cryptodev));
+ if (cryptodev == NULL) {
+ rte_cryptodev_stop(dev_id);
+ return NULL;
+ }
+
+ strlcpy(cryptodev->name, name, sizeof(cryptodev->name));
+ cryptodev->dev_id = dev_id;
+ cryptodev->n_queues = params->n_queues;
+
+ TAILQ_INSERT_TAIL(&cryptodev_list, cryptodev, node);
+
+ return cryptodev;
+}
diff --git a/examples/ip_pipeline/cryptodev.h b/examples/ip_pipeline/cryptodev.h
new file mode 100644
index 00000000..d06b3f2f
--- /dev/null
+++ b/examples/ip_pipeline/cryptodev.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_SYM_C_H_
+#define _INCLUDE_SYM_C_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_cryptodev.h>
+
+#include "common.h"
+
+struct cryptodev {
+ TAILQ_ENTRY(cryptodev) node;
+ char name[NAME_SIZE];
+ uint16_t dev_id;
+ uint32_t n_queues;
+};
+
+TAILQ_HEAD(cryptodev_list, cryptodev);
+
+int
+cryptodev_init(void);
+
+struct cryptodev *
+cryptodev_find(const char *name);
+
+struct cryptodev *
+cryptodev_next(struct cryptodev *cryptodev);
+
+struct cryptodev_params {
+ const char *dev_name;
+ uint32_t dev_id; /**< Valid only when *dev_name* is NULL. */
+ uint32_t n_queues;
+ uint32_t queue_size;
+};
+
+struct cryptodev *
+cryptodev_create(const char *name, struct cryptodev_params *params);
+
+#endif
diff --git a/examples/ip_pipeline/examples/flow_crypto.cli b/examples/ip_pipeline/examples/flow_crypto.cli
new file mode 100644
index 00000000..9b639deb
--- /dev/null
+++ b/examples/ip_pipeline/examples/flow_crypto.cli
@@ -0,0 +1,58 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2018 Intel Corporation
+
+; ________________
+; LINK0 RXQ0 --->| |---> CRYPTO0 TXQ0
+; | Flow |
+; CRYPTO0 RXQ0-->| Classification |---> LINK0 TXQ0
+; |________________|
+; |
+; +-----------> SINK0 (flow lookup miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 280 20
+; 4 Packet 256 1536
+; 5 Crypto Operation 1792 160
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 1
+mempool MEMPOOL_SESSION0 buffer 1024 pool 1024 cache 128 cpu 1
+
+link LINK0 dev 0000:81:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+
+#Cryptodev
+cryptodev CRYPTO0 dev crypto_aesni_gcm0 queue 1 1024
+
+table action profile AP0 ipv4 offset 270 fwd sym_crypto dev CRYPTO0 offset 1792 mempool_create MEMPOOL_SESSION0 mempool_init MEMPOOL_SESSION0
+table action profile AP1 ipv4 offset 270 fwd
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 1
+
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 cryptodev CRYPTO0 rxq 0
+
+pipeline PIPELINE0 port out bsz 32 cryptodev CRYPTO0 txq 0 offset 1792
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+pipeline PIPELINE0 port out bsz 32 sink
+
+pipeline PIPELINE0 table match hash ext key 8 mask FFFFFFFF00000000 offset 282 buckets 1K size 4K action AP0
+pipeline PIPELINE0 table match stub action AP1
+
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 1
+
+thread 24 pipeline PIPELINE0 enable
+
+pipeline PIPELINE0 table 0 rule add match default action fwd port 2
+
+#AES-GCM encrypt
+pipeline PIPELINE0 table 0 rule add match hash ipv4_addr 100.0.0.10 action fwd port 0 sym_crypto encrypt type aead aead_algo aes-gcm aead_key 000102030405060708090a0b0c0d0e0f aead_iv 000102030405060708090a0b aead_aad 000102030405060708090a0b0c0d0e0f digest_size 8 data_offset 290
+#AES-GCM decrypt
+#pipeline PIPELINE0 table 0 rule add match hash ipv4_addr 100.0.0.10 action fwd port 0 sym_crypto decrypt type aead aead_algo aes-gcm aead_key 000102030405060708090a0b0c0d0e0f aead_iv 000102030405060708090a0b aead_aad 000102030405060708090a0b0c0d0e0f digest_size 8 data_offset 290
+
+pipeline PIPELINE0 table 1 rule add match default action fwd port 1
diff --git a/examples/ip_pipeline/hash_func.h b/examples/ip_pipeline/hash_func.h
deleted file mode 100644
index f1b9d944..00000000
--- a/examples/ip_pipeline/hash_func.h
+++ /dev/null
@@ -1,357 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2018 Intel Corporation
- */
-
-#ifndef __INCLUDE_HASH_FUNC_H__
-#define __INCLUDE_HASH_FUNC_H__
-
-static inline uint64_t
-hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = seed ^ (k[0] & m[0]);
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
-
- xor0 ^= k[2] & m[2];
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
-
- xor0 ^= xor1;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
-
- xor0 ^= xor1;
-
- xor0 ^= k[4] & m[4];
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
-
- xor0 ^= xor1;
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
-
- xor0 ^= xor1;
- xor2 ^= k[6] & m[6];
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2, xor3;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
- xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
-
- xor0 ^= xor1;
- xor2 ^= xor3;
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-#if defined(RTE_ARCH_X86_64)
-
-#include <x86intrin.h>
-
-static inline uint64_t
-hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t crc0;
-
- crc0 = _mm_crc32_u64(seed, k[0] & m[0]);
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, crc0, crc1;
-
- k0 = k[0] & m[0];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc0 = _mm_crc32_u64(crc0, k2);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = k2 >> 32;
-
- crc0 = _mm_crc32_u64(crc0, crc1);
- crc1 = _mm_crc32_u64(crc2, crc3);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc0 = _mm_crc32_u64(crc0, crc1);
- crc1 = _mm_crc32_u64(crc2, crc3);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, k5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
- crc5 = k5 >> 32;
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
- crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]);
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-#define hash_default_key8 hash_crc_key8
-#define hash_default_key16 hash_crc_key16
-#define hash_default_key24 hash_crc_key24
-#define hash_default_key32 hash_crc_key32
-#define hash_default_key40 hash_crc_key40
-#define hash_default_key48 hash_crc_key48
-#define hash_default_key56 hash_crc_key56
-#define hash_default_key64 hash_crc_key64
-
-#elif defined(RTE_ARCH_ARM64)
-#include "hash_func_arm64.h"
-#else
-
-#define hash_default_key8 hash_xor_key8
-#define hash_default_key16 hash_xor_key16
-#define hash_default_key24 hash_xor_key24
-#define hash_default_key32 hash_xor_key32
-#define hash_default_key40 hash_xor_key40
-#define hash_default_key48 hash_xor_key48
-#define hash_default_key56 hash_xor_key56
-#define hash_default_key64 hash_xor_key64
-
-#endif
-
-#endif
diff --git a/examples/ip_pipeline/hash_func_arm64.h b/examples/ip_pipeline/hash_func_arm64.h
deleted file mode 100644
index 50df8167..00000000
--- a/examples/ip_pipeline/hash_func_arm64.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2018 Linaro Limited.
- */
-#ifndef __HASH_FUNC_ARM64_H__
-#define __HASH_FUNC_ARM64_H__
-
-#define _CRC32CX(crc, val) \
- __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val))
-
-static inline uint64_t
-hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint32_t crc0;
-
- crc0 = seed;
- _CRC32CX(crc0, k[0] & m[0]);
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0;
- uint64_t *m = mask;
- uint32_t crc0, crc1;
-
- k0 = k[0] & m[0];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- _CRC32CX(crc0, k2);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
-
- _CRC32CX(crc0, crc1);
- _CRC32CX(crc2, crc3);
-
- crc0 ^= crc2;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- _CRC32CX(crc0, crc1);
- _CRC32CX(crc2, crc3);
-
- crc0 ^= crc2;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, k5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- crc4 = k5;
- _CRC32CX(crc4, k[6] & m[6]);
- crc5 = k5 >> 32;
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- crc4 = k5;
- _CRC32CX(crc4, k[6] & m[6]);
- crc5 = k5 >> 32;
- _CRC32CX(crc5, k[7] & m[7]);
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-#define hash_default_key8 hash_crc_key8
-#define hash_default_key16 hash_crc_key16
-#define hash_default_key24 hash_crc_key24
-#define hash_default_key32 hash_crc_key32
-#define hash_default_key40 hash_crc_key40
-#define hash_default_key48 hash_crc_key48
-#define hash_default_key56 hash_crc_key56
-#define hash_default_key64 hash_crc_key64
-
-#endif
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
index 392a890f..787eb866 100644
--- a/examples/ip_pipeline/link.c
+++ b/examples/ip_pipeline/link.c
@@ -48,7 +48,6 @@ static struct rte_eth_conf port_conf_default = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
.split_hdr_size = 0, /* Header split buffer size */
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/ip_pipeline/main.c b/examples/ip_pipeline/main.c
index a69facee..97d1e91c 100644
--- a/examples/ip_pipeline/main.c
+++ b/examples/ip_pipeline/main.c
@@ -14,6 +14,7 @@
#include "cli.h"
#include "conn.h"
#include "kni.h"
+#include "cryptodev.h"
#include "link.h"
#include "mempool.h"
#include "pipeline.h"
@@ -210,6 +211,14 @@ main(int argc, char **argv)
return status;
}
+ /* Sym Crypto */
+ status = cryptodev_init();
+ if (status) {
+ printf("Error: Cryptodev initialization failed (%d)\n",
+ status);
+ return status;
+ }
+
/* Action */
status = port_in_action_profile_init();
if (status) {
diff --git a/examples/ip_pipeline/meson.build b/examples/ip_pipeline/meson.build
index a9f2ea44..5e5fe647 100644
--- a/examples/ip_pipeline/meson.build
+++ b/examples/ip_pipeline/meson.build
@@ -21,5 +21,6 @@ sources = files(
'swq.c',
'tap.c',
'thread.c',
- 'tmgr.c'
+ 'tmgr.c',
+ 'cryptodev.c'
)
diff --git a/examples/ip_pipeline/pipeline.c b/examples/ip_pipeline/pipeline.c
index 43fe8677..b23d6c09 100644
--- a/examples/ip_pipeline/pipeline.c
+++ b/examples/ip_pipeline/pipeline.c
@@ -18,10 +18,12 @@
#include <rte_port_source_sink.h>
#include <rte_port_fd.h>
#include <rte_port_sched.h>
+#include <rte_port_sym_crypto.h>
#include <rte_table_acl.h>
#include <rte_table_array.h>
#include <rte_table_hash.h>
+#include <rte_table_hash_func.h>
#include <rte_table_lpm.h>
#include <rte_table_lpm_ipv6.h>
#include <rte_table_stub.h>
@@ -35,8 +37,7 @@
#include "tap.h"
#include "tmgr.h"
#include "swq.h"
-
-#include "hash_func.h"
+#include "cryptodev.h"
#ifndef PIPELINE_MSGQ_SIZE
#define PIPELINE_MSGQ_SIZE 64
@@ -163,6 +164,7 @@ pipeline_port_in_create(const char *pipeline_name,
struct rte_port_kni_reader_params kni;
#endif
struct rte_port_source_params source;
+ struct rte_port_sym_crypto_reader_params sym_crypto;
} pp;
struct pipeline *pipeline;
@@ -296,6 +298,27 @@ pipeline_port_in_create(const char *pipeline_name,
break;
}
+ case PORT_IN_CRYPTODEV:
+ {
+ struct cryptodev *cryptodev;
+
+ cryptodev = cryptodev_find(params->dev_name);
+ if (cryptodev == NULL)
+ return -1;
+
+ if (params->rxq.queue_id > cryptodev->n_queues - 1)
+ return -1;
+
+ pp.sym_crypto.cryptodev_id = cryptodev->dev_id;
+ pp.sym_crypto.queue_id = params->cryptodev.queue_id;
+ pp.sym_crypto.f_callback = params->cryptodev.f_callback;
+ pp.sym_crypto.arg_callback = params->cryptodev.arg_callback;
+ p.ops = &rte_port_sym_crypto_reader_ops;
+ p.arg_create = &pp.sym_crypto;
+
+ break;
+ }
+
default:
return -1;
}
@@ -385,6 +408,7 @@ pipeline_port_out_create(const char *pipeline_name,
struct rte_port_kni_writer_params kni;
#endif
struct rte_port_sink_params sink;
+ struct rte_port_sym_crypto_writer_params sym_crypto;
} pp;
union {
@@ -394,6 +418,7 @@ pipeline_port_out_create(const char *pipeline_name,
#ifdef RTE_LIBRTE_KNI
struct rte_port_kni_writer_nodrop_params kni;
#endif
+ struct rte_port_sym_crypto_writer_nodrop_params sym_crypto;
} pp_nodrop;
struct pipeline *pipeline;
@@ -549,6 +574,40 @@ pipeline_port_out_create(const char *pipeline_name,
break;
}
+ case PORT_OUT_CRYPTODEV:
+ {
+ struct cryptodev *cryptodev;
+
+ cryptodev = cryptodev_find(params->dev_name);
+ if (cryptodev == NULL)
+ return -1;
+
+ if (params->cryptodev.queue_id >= cryptodev->n_queues)
+ return -1;
+
+ pp.sym_crypto.cryptodev_id = cryptodev->dev_id;
+ pp.sym_crypto.queue_id = params->cryptodev.queue_id;
+ pp.sym_crypto.tx_burst_sz = params->burst_size;
+ pp.sym_crypto.crypto_op_offset = params->cryptodev.op_offset;
+
+ pp_nodrop.sym_crypto.cryptodev_id = cryptodev->dev_id;
+ pp_nodrop.sym_crypto.queue_id = params->cryptodev.queue_id;
+ pp_nodrop.sym_crypto.tx_burst_sz = params->burst_size;
+ pp_nodrop.sym_crypto.n_retries = params->retry;
+ pp_nodrop.sym_crypto.crypto_op_offset =
+ params->cryptodev.op_offset;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_sym_crypto_writer_ops;
+ p.arg_create = &pp.sym_crypto;
+ } else {
+ p.ops = &rte_port_sym_crypto_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.sym_crypto;
+ }
+
+ break;
+ }
+
default:
return -1;
}
@@ -818,28 +877,28 @@ pipeline_table_create(const char *pipeline_name,
switch (params->match.hash.key_size) {
case 8:
- f_hash = hash_default_key8;
+ f_hash = rte_table_hash_crc_key8;
break;
case 16:
- f_hash = hash_default_key16;
+ f_hash = rte_table_hash_crc_key16;
break;
case 24:
- f_hash = hash_default_key24;
+ f_hash = rte_table_hash_crc_key24;
break;
case 32:
- f_hash = hash_default_key32;
+ f_hash = rte_table_hash_crc_key32;
break;
case 40:
- f_hash = hash_default_key40;
+ f_hash = rte_table_hash_crc_key40;
break;
case 48:
- f_hash = hash_default_key48;
+ f_hash = rte_table_hash_crc_key48;
break;
case 56:
- f_hash = hash_default_key56;
+ f_hash = rte_table_hash_crc_key56;
break;
case 64:
- f_hash = hash_default_key64;
+ f_hash = rte_table_hash_crc_key64;
break;
default:
return -1;
diff --git a/examples/ip_pipeline/pipeline.h b/examples/ip_pipeline/pipeline.h
index a953a29f..e5b1d5d0 100644
--- a/examples/ip_pipeline/pipeline.h
+++ b/examples/ip_pipeline/pipeline.h
@@ -27,6 +27,7 @@ enum port_in_type {
PORT_IN_TAP,
PORT_IN_KNI,
PORT_IN_SOURCE,
+ PORT_IN_CRYPTODEV,
};
struct port_in_params {
@@ -48,6 +49,12 @@ struct port_in_params {
const char *file_name;
uint32_t n_bytes_per_pkt;
} source;
+
+ struct {
+ uint16_t queue_id;
+ void *f_callback;
+ void *arg_callback;
+ } cryptodev;
};
uint32_t burst_size;
@@ -62,6 +69,7 @@ enum port_out_type {
PORT_OUT_TAP,
PORT_OUT_KNI,
PORT_OUT_SINK,
+ PORT_OUT_CRYPTODEV,
};
struct port_out_params {
@@ -76,6 +84,11 @@ struct port_out_params {
const char *file_name;
uint32_t max_n_pkts;
} sink;
+
+ struct {
+ uint16_t queue_id;
+ uint32_t op_offset;
+ } cryptodev;
};
uint32_t burst_size;
int retry;
@@ -268,6 +281,9 @@ struct table_rule_action {
struct rte_table_action_ttl_params ttl;
struct rte_table_action_stats_params stats;
struct rte_table_action_time_params time;
+ struct rte_table_action_sym_crypto_params sym_crypto;
+ struct rte_table_action_tag_params tag;
+ struct rte_table_action_decap_params decap;
};
int
diff --git a/examples/ip_pipeline/thread.c b/examples/ip_pipeline/thread.c
index 7fc03332..4bd971fd 100644
--- a/examples/ip_pipeline/thread.c
+++ b/examples/ip_pipeline/thread.c
@@ -2244,29 +2244,37 @@ match_convert(struct table_rule_match *mh,
ml->acl_add.field_value[0].mask_range.u8 =
mh->match.acl.proto_mask;
- ml->acl_add.field_value[1].value.u32 = sa32[0];
+ ml->acl_add.field_value[1].value.u32 =
+ rte_be_to_cpu_32(sa32[0]);
ml->acl_add.field_value[1].mask_range.u32 =
sa32_depth[0];
- ml->acl_add.field_value[2].value.u32 = sa32[1];
+ ml->acl_add.field_value[2].value.u32 =
+ rte_be_to_cpu_32(sa32[1]);
ml->acl_add.field_value[2].mask_range.u32 =
sa32_depth[1];
- ml->acl_add.field_value[3].value.u32 = sa32[2];
+ ml->acl_add.field_value[3].value.u32 =
+ rte_be_to_cpu_32(sa32[2]);
ml->acl_add.field_value[3].mask_range.u32 =
sa32_depth[2];
- ml->acl_add.field_value[4].value.u32 = sa32[3];
+ ml->acl_add.field_value[4].value.u32 =
+ rte_be_to_cpu_32(sa32[3]);
ml->acl_add.field_value[4].mask_range.u32 =
sa32_depth[3];
- ml->acl_add.field_value[5].value.u32 = da32[0];
+ ml->acl_add.field_value[5].value.u32 =
+ rte_be_to_cpu_32(da32[0]);
ml->acl_add.field_value[5].mask_range.u32 =
da32_depth[0];
- ml->acl_add.field_value[6].value.u32 = da32[1];
+ ml->acl_add.field_value[6].value.u32 =
+ rte_be_to_cpu_32(da32[1]);
ml->acl_add.field_value[6].mask_range.u32 =
da32_depth[1];
- ml->acl_add.field_value[7].value.u32 = da32[2];
+ ml->acl_add.field_value[7].value.u32 =
+ rte_be_to_cpu_32(da32[2]);
ml->acl_add.field_value[7].mask_range.u32 =
da32_depth[2];
- ml->acl_add.field_value[8].value.u32 = da32[3];
+ ml->acl_add.field_value[8].value.u32 =
+ rte_be_to_cpu_32(da32[3]);
ml->acl_add.field_value[8].mask_range.u32 =
da32_depth[3];
@@ -2308,36 +2316,36 @@ match_convert(struct table_rule_match *mh,
mh->match.acl.proto_mask;
ml->acl_delete.field_value[1].value.u32 =
- sa32[0];
+ rte_be_to_cpu_32(sa32[0]);
ml->acl_delete.field_value[1].mask_range.u32 =
sa32_depth[0];
ml->acl_delete.field_value[2].value.u32 =
- sa32[1];
+ rte_be_to_cpu_32(sa32[1]);
ml->acl_delete.field_value[2].mask_range.u32 =
sa32_depth[1];
ml->acl_delete.field_value[3].value.u32 =
- sa32[2];
+ rte_be_to_cpu_32(sa32[2]);
ml->acl_delete.field_value[3].mask_range.u32 =
sa32_depth[2];
ml->acl_delete.field_value[4].value.u32 =
- sa32[3];
+ rte_be_to_cpu_32(sa32[3]);
ml->acl_delete.field_value[4].mask_range.u32 =
sa32_depth[3];
ml->acl_delete.field_value[5].value.u32 =
- da32[0];
+ rte_be_to_cpu_32(da32[0]);
ml->acl_delete.field_value[5].mask_range.u32 =
da32_depth[0];
ml->acl_delete.field_value[6].value.u32 =
- da32[1];
+ rte_be_to_cpu_32(da32[1]);
ml->acl_delete.field_value[6].mask_range.u32 =
da32_depth[1];
ml->acl_delete.field_value[7].value.u32 =
- da32[2];
+ rte_be_to_cpu_32(da32[2]);
ml->acl_delete.field_value[7].mask_range.u32 =
da32_depth[2];
ml->acl_delete.field_value[8].value.u32 =
- da32[3];
+ rte_be_to_cpu_32(da32[3]);
ml->acl_delete.field_value[8].mask_range.u32 =
da32_depth[3];
@@ -2476,6 +2484,36 @@ action_convert(struct rte_table_action *a,
return status;
}
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_SYM_CRYPTO,
+ &action->sym_crypto);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TAG,
+ &action->tag);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_DECAP,
+ &action->decap);
+
+ if (status)
+ return status;
+ }
+
return 0;
}
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index b830f67a..17b55d4c 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -165,8 +165,7 @@ static struct rte_eth_conf port_conf = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP),
+ DEV_RX_OFFLOAD_JUMBO_FRAME),
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index ee9e590a..e33232c9 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -96,6 +96,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
/* Copy IV at the end of crypto operation */
rte_memcpy(iv_ptr, iv, sa->iv_len);
@@ -326,6 +327,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
memset(iv, 0, sa->iv_len);
break;
@@ -387,6 +389,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr);
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index b45b87bd..1bc0b5b5 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -54,7 +54,7 @@
#define NB_MBUF (32000)
#define CDEV_QUEUE_DESC 2048
-#define CDEV_MAP_ENTRIES 1024
+#define CDEV_MAP_ENTRIES 16384
#define CDEV_MP_NB_OBJS 2048
#define CDEV_MP_CACHE_SZ 64
#define MAX_QUEUE_PAIRS 1
@@ -197,8 +197,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP,
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
@@ -1392,9 +1391,27 @@ cryptodevs_init(void)
uint32_t max_sess_sz = 0, sess_sz;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ void *sec_ctx;
+
+ /* Get crypto priv session size */
sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
if (sess_sz > max_sess_sz)
max_sess_sz = sess_sz;
+
+ /*
+ * If crypto device is security capable, need to check the
+ * size of security session as well.
+ */
+
+ /* Get security context of the crypto device */
+ sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ /* Get size of security session */
+ sess_sz = rte_security_session_get_size(sec_ctx);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
}
RTE_ETH_FOREACH_DEV(port_id) {
void *sec_ctx;
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 4ab8e098..d2d3550a 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -81,6 +81,13 @@ const struct supported_cipher_algo cipher_algos[] = {
.iv_len = 8,
.block_size = 16, /* XXX AESNI MB limition, should be 4 */
.key_len = 20
+ },
+ {
+ .keyword = "3des-cbc",
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .iv_len = 8,
+ .block_size = 8,
+ .key_len = 24
}
};
@@ -327,7 +334,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
if (status->status < 0)
return;
- if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC)
+ if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
+ algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
rule->salt = (uint32_t)rte_rand();
if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
@@ -810,6 +818,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
iv_length = sa->iv_len;
break;
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 331c32e7..4073a490 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -109,8 +109,7 @@ static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP),
+ .offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -774,7 +773,7 @@ main(int argc, char **argv)
qconf->tx_queue_id[portid] = queueid;
queueid++;
}
-
+ rte_eth_allmulticast_enable(portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
diff --git a/examples/kni/Makefile b/examples/kni/Makefile
index 7e19d2e2..dd90d7d7 100644
--- a/examples/kni/Makefile
+++ b/examples/kni/Makefile
@@ -20,6 +20,7 @@ static: build/$(APP)-static
PC_FILE := $(shell pkg-config --path libdpdk)
CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
@@ -54,6 +55,7 @@ please change the definition of the RTE_TARGET environment variable)
endif
CFLAGS += -O3
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += $(WERROR_FLAGS)
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 81336087..e37b1ad3 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -94,9 +94,6 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
- .rxmode = {
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- },
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
},
@@ -109,6 +106,8 @@ static struct rte_mempool * pktmbuf_pool = NULL;
static uint32_t ports_mask = 0;
/* Ports set in promiscuous mode off by default. */
static int promiscuous_on = 0;
+/* Monitor link status continually. off by default. */
+static int monitor_links;
/* Structure type for recording kni interface specific stats */
struct kni_interface_stats {
@@ -172,14 +171,13 @@ signal_handler(int signum)
/* When we receive a USR2 signal, reset stats */
if (signum == SIGUSR2) {
memset(&kni_stats, 0, sizeof(kni_stats));
- printf("\n**Statistics have been reset**\n");
+ printf("\n** Statistics have been reset **\n");
return;
}
/* When we receive a RTMIN or SIGINT signal, stop kni processing */
if (signum == SIGRTMIN || signum == SIGINT){
- printf("SIGRTMIN is received, and the KNI processing is "
- "going to stop\n");
+ printf("\nSIGRTMIN/SIGINT received. KNI processing stopping.\n");
rte_atomic32_inc(&kni_stop);
return;
}
@@ -225,7 +223,8 @@ kni_ingress(struct kni_port_params *p)
}
/* Burst tx to kni */
num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
- kni_stats[port_id].rx_packets += num;
+ if (num)
+ kni_stats[port_id].rx_packets += num;
rte_kni_handle_request(p->kni[i]);
if (unlikely(num < nb_rx)) {
@@ -262,7 +261,8 @@ kni_egress(struct kni_port_params *p)
}
/* Burst tx to eth */
nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num);
- kni_stats[port_id].tx_packets += nb_tx;
+ if (nb_tx)
+ kni_stats[port_id].tx_packets += nb_tx;
if (unlikely(nb_tx < num)) {
/* Free mbufs not tx to NIC */
kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
@@ -328,11 +328,12 @@ main_loop(__rte_unused void *arg)
static void
print_usage(const char *prgname)
{
- RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P "
+ RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P -m "
"[--config (port,lcore_rx,lcore_tx,lcore_kthread...)"
"[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n"
" -p PORTMASK: hex bitmask of ports to use\n"
" -P : enable promiscuous mode\n"
+ " -m : enable monitoring of port carrier state\n"
" --config (port,lcore_rx,lcore_tx,lcore_kthread...): "
"port and lcore configurations\n",
prgname);
@@ -513,7 +514,7 @@ parse_args(int argc, char **argv)
opterr = 0;
/* Parse command line */
- while ((opt = getopt_long(argc, argv, "p:P", longopts,
+ while ((opt = getopt_long(argc, argv, "p:Pm", longopts,
&longindex)) != EOF) {
switch (opt) {
case 'p':
@@ -522,6 +523,9 @@ parse_args(int argc, char **argv)
case 'P':
promiscuous_on = 1;
break;
+ case 'm':
+ monitor_links = 1;
+ break;
case 0:
if (!strncmp(longopts[longindex].name,
CMDLINE_OPT_CONFIG,
@@ -677,6 +681,55 @@ check_all_ports_link_status(uint32_t port_mask)
}
}
+static void
+log_link_state(struct rte_kni *kni, int prev, struct rte_eth_link *link)
+{
+ if (kni == NULL || link == NULL)
+ return;
+
+ if (prev == ETH_LINK_DOWN && link->link_status == ETH_LINK_UP) {
+ RTE_LOG(INFO, APP, "%s NIC Link is Up %d Mbps %s %s.\n",
+ rte_kni_get_name(kni),
+ link->link_speed,
+ link->link_autoneg ? "(AutoNeg)" : "(Fixed)",
+ link->link_duplex ? "Full Duplex" : "Half Duplex");
+ } else if (prev == ETH_LINK_UP && link->link_status == ETH_LINK_DOWN) {
+ RTE_LOG(INFO, APP, "%s NIC Link is Down.\n",
+ rte_kni_get_name(kni));
+ }
+}
+
+/*
+ * Monitor the link status of all ports and update the
+ * corresponding KNI interface(s)
+ */
+static void *
+monitor_all_ports_link_status(void *arg)
+{
+ uint16_t portid;
+ struct rte_eth_link link;
+ unsigned int i;
+ struct kni_port_params **p = kni_port_params_array;
+ int prev;
+ (void) arg;
+
+ while (monitor_links) {
+ rte_delay_ms(500);
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((ports_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(portid, &link);
+ for (i = 0; i < p[portid]->nb_kni; i++) {
+ prev = rte_kni_update_link(p[portid]->kni[i],
+ link.link_status);
+ log_link_state(p[portid]->kni[i], prev, &link);
+ }
+ }
+ }
+ return NULL;
+}
+
/* Callback for request of changing MTU */
static int
kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
@@ -896,6 +949,9 @@ main(int argc, char** argv)
int ret;
uint16_t nb_sys_ports, port;
unsigned i;
+ void *retval;
+ pthread_t kni_link_tid;
+ int pid;
/* Associate signal_hanlder function with USR signals */
signal(SIGUSR1, signal_handler);
@@ -952,12 +1008,31 @@ main(int argc, char** argv)
}
check_all_ports_link_status(ports_mask);
+ pid = getpid();
+ RTE_LOG(INFO, APP, "========================\n");
+ RTE_LOG(INFO, APP, "KNI Running\n");
+ RTE_LOG(INFO, APP, "kill -SIGUSR1 %d\n", pid);
+ RTE_LOG(INFO, APP, " Show KNI Statistics.\n");
+ RTE_LOG(INFO, APP, "kill -SIGUSR2 %d\n", pid);
+ RTE_LOG(INFO, APP, " Zero KNI Statistics.\n");
+ RTE_LOG(INFO, APP, "========================\n");
+ fflush(stdout);
+
+ ret = rte_ctrl_thread_create(&kni_link_tid,
+ "KNI link status check", NULL,
+ monitor_all_ports_link_status, NULL);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Could not create link status thread!\n");
+
/* Launch per-lcore function on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
RTE_LCORE_FOREACH_SLAVE(i) {
if (rte_eal_wait_lcore(i) < 0)
return -1;
}
+ monitor_links = 0;
+ pthread_join(kni_link_tid, &retval);
/* Release resources */
RTE_ETH_FOREACH_DEV(port) {
diff --git a/examples/kni/meson.build b/examples/kni/meson.build
index 79131639..fd6ae444 100644
--- a/examples/kni/meson.build
+++ b/examples/kni/meson.build
@@ -12,3 +12,4 @@ deps += ['kni', 'bus_pci']
sources = files(
'main.c'
)
+allow_experimental_apis = true
diff --git a/examples/l2fwd-cat/Makefile b/examples/l2fwd-cat/Makefile
index aec770c2..b6eeabde 100644
--- a/examples/l2fwd-cat/Makefile
+++ b/examples/l2fwd-cat/Makefile
@@ -23,7 +23,6 @@ CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
-CFLAGS += -D_GNU_SOURCE
LDFLAGS += -lpqos
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
@@ -66,7 +65,6 @@ endif
EXTRA_CFLAGS += -O3 -g -Wfatal-errors
CFLAGS += -I$(PQOS_INSTALL_PATH)/../include
-CFLAGS_cat.o := -D_GNU_SOURCE
LDLIBS += -L$(PQOS_INSTALL_PATH)
LDLIBS += -lpqos
diff --git a/examples/l2fwd-cat/meson.build b/examples/l2fwd-cat/meson.build
index 1234e7b5..4e2777a0 100644
--- a/examples/l2fwd-cat/meson.build
+++ b/examples/l2fwd-cat/meson.build
@@ -9,7 +9,6 @@
pqos = cc.find_library('pqos', required: false)
build = pqos.found()
ext_deps += pqos
-cflags += '-D_GNU_SOURCE'
cflags += '-I/usr/local/include' # assume pqos lib installed in /usr/local
sources = files(
'cat.c', 'l2fwd-cat.c'
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 6061b751..f12fd266 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -213,7 +213,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index af542338..a4d28e17 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -90,7 +90,6 @@ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 2d8b4d1c..0bf2b533 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -81,7 +81,6 @@ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 9bb4c5bc..6c23215a 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -82,7 +82,6 @@ static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 7c063a8d..a322ce4f 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -127,8 +127,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_CHECKSUM),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/l3fwd-power/Makefile b/examples/l3fwd-power/Makefile
index d7e39a34..772ec7ba 100644
--- a/examples/l3fwd-power/Makefile
+++ b/examples/l3fwd-power/Makefile
@@ -23,6 +23,8 @@ CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
@@ -54,6 +56,7 @@ please change the definition of the RTE_TARGET environment variable)
all:
else
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index d15cd520..0b3f8fe6 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -43,6 +43,7 @@
#include <rte_timer.h>
#include <rte_power.h>
#include <rte_spinlock.h>
+#include <rte_power_empty_poll.h>
#include "perf_core.h"
#include "main.h"
@@ -55,6 +56,8 @@
/* 100 ms interval */
#define TIMER_NUMBER_PER_SECOND 10
+/* (10ms) */
+#define INTERVALS_PER_SECOND 100
/* 100000 us */
#define SCALING_PERIOD (1000000/TIMER_NUMBER_PER_SECOND)
#define SCALING_DOWN_TIME_RATIO_THRESHOLD 0.25
@@ -117,6 +120,17 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 1024
#define RTE_TEST_TX_DESC_DEFAULT 1024
+
+/*
+ * These two thresholds were decided on by running the training algorithm on
+ * a 2.5GHz Xeon. These defaults can be overridden by supplying non-zero values
+ * for the med_threshold and high_threshold parameters on the command line.
+ */
+#define EMPTY_POLL_MED_THRESHOLD 350000UL
+#define EMPTY_POLL_HGH_THRESHOLD 580000UL
+
+
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -132,6 +146,14 @@ static uint32_t enabled_port_mask = 0;
static int promiscuous_on = 0;
/* NUMA is enabled by default. */
static int numa_on = 1;
+/* emptypoll is disabled by default. */
+static bool empty_poll_on;
+static bool empty_poll_train;
+volatile bool empty_poll_stop;
+static struct ep_params *ep_params;
+static struct ep_policy policy;
+static long ep_med_edpi, ep_hgh_edpi;
+
static int parse_ptype; /**< Parse packet type using rx callback, and */
/**< disabled by default */
@@ -180,8 +202,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_CHECKSUM),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
@@ -331,6 +352,19 @@ static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
static inline enum freq_scale_hint_t power_freq_scaleup_heuristic( \
unsigned int lcore_id, uint16_t port_id, uint16_t queue_id);
+
+/*
+ * These defaults are using the max frequency index (1), a medium index (9)
+ * and a typical low frequency index (14). These can be adjusted to use
+ * different indexes using the relevant command line parameters.
+ */
+static uint8_t freq_tlb[] = {14, 9, 1};
+
+static int is_done(void)
+{
+ return empty_poll_stop;
+}
+
/* exit signal handler */
static void
signal_exit_now(int sigtype)
@@ -340,6 +374,10 @@ signal_exit_now(int sigtype)
int ret;
if (sigtype == SIGINT) {
+ if (empty_poll_on)
+ empty_poll_stop = true;
+
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
@@ -352,16 +390,19 @@ signal_exit_now(int sigtype)
"core%u\n", lcore_id);
}
- RTE_ETH_FOREACH_DEV(portid) {
- if ((enabled_port_mask & (1 << portid)) == 0)
- continue;
+ if (!empty_poll_on) {
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
- rte_eth_dev_stop(portid);
- rte_eth_dev_close(portid);
+ rte_eth_dev_stop(portid);
+ rte_eth_dev_close(portid);
+ }
}
}
- rte_exit(EXIT_SUCCESS, "User forced exit\n");
+ if (!empty_poll_on)
+ rte_exit(EXIT_SUCCESS, "User forced exit\n");
}
/* Freqency scale down timer callback */
@@ -826,7 +867,110 @@ static int event_register(struct lcore_conf *qconf)
return 0;
}
+/* main processing loop */
+static int
+main_empty_poll_loop(__attribute__((unused)) void *dummy)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ unsigned int lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ int i, j, nb_rx;
+ uint8_t queueid;
+ uint16_t portid;
+ struct lcore_conf *qconf;
+ struct lcore_rx_queue *rx_queue;
+
+ const uint64_t drain_tsc =
+ (rte_get_tsc_hz() + US_PER_S - 1) /
+ US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_conf[lcore_id];
+
+ if (qconf->n_rx_queue == 0) {
+ RTE_LOG(INFO, L3FWD_POWER, "lcore %u has nothing to do\n",
+ lcore_id);
+ return 0;
+ }
+
+ for (i = 0; i < qconf->n_rx_queue; i++) {
+ portid = qconf->rx_queue_list[i].port_id;
+ queueid = qconf->rx_queue_list[i].queue_id;
+ RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
+ "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ }
+
+ while (!is_done()) {
+ stats[lcore_id].nb_iteration_looped++;
+
+ cur_tsc = rte_rdtsc();
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ for (i = 0; i < qconf->n_tx_port; ++i) {
+ portid = qconf->tx_port_id[i];
+ rte_eth_tx_buffer_flush(portid,
+ qconf->tx_queue_id[portid],
+ qconf->tx_buffer[portid]);
+ }
+ prev_tsc = cur_tsc;
+ }
+
+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->n_rx_queue; ++i) {
+ rx_queue = &(qconf->rx_queue_list[i]);
+ rx_queue->idle_hint = 0;
+ portid = rx_queue->port_id;
+ queueid = rx_queue->queue_id;
+
+ nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
+ MAX_PKT_BURST);
+
+ stats[lcore_id].nb_rx_processed += nb_rx;
+
+ if (nb_rx == 0) {
+
+ rte_power_empty_poll_stat_update(lcore_id);
+
+ continue;
+ } else {
+ rte_power_poll_stat_update(lcore_id, nb_rx);
+ }
+
+ /* Prefetch first packets */
+ for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[j], void *));
+ }
+
+ /* Prefetch and forward already prefetched packets */
+ for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
+ j + PREFETCH_OFFSET],
+ void *));
+ l3fwd_simple_forward(pkts_burst[j], portid,
+ qconf);
+ }
+
+ /* Forward remaining prefetched packets */
+ for (; j < nb_rx; j++) {
+ l3fwd_simple_forward(pkts_burst[j], portid,
+ qconf);
+ }
+
+ }
+
+ }
+
+ return 0;
+}
/* main processing loop */
static int
main_loop(__attribute__((unused)) void *dummy)
@@ -1128,7 +1272,9 @@ print_usage(const char *prgname)
" --no-numa: optional, disable numa awareness\n"
" --enable-jumbo: enable jumbo frame"
" which max packet len is PKTLEN in decimal (64-9600)\n"
- " --parse-ptype: parse packet type by software\n",
+ " --parse-ptype: parse packet type by software\n"
+ " --empty-poll: enable empty poll detection"
+ " follow (training_flag, high_threshold, med_threshold)\n",
prgname);
}
@@ -1221,7 +1367,55 @@ parse_config(const char *q_arg)
return 0;
}
+static int
+parse_ep_config(const char *q_arg)
+{
+ char s[256];
+ const char *p = q_arg;
+ char *end;
+ int num_arg;
+
+ char *str_fld[3];
+
+ int training_flag;
+ int med_edpi;
+ int hgh_edpi;
+
+ ep_med_edpi = EMPTY_POLL_MED_THRESHOLD;
+ ep_hgh_edpi = EMPTY_POLL_MED_THRESHOLD;
+
+ snprintf(s, sizeof(s), "%s", p);
+
+ num_arg = rte_strsplit(s, sizeof(s), str_fld, 3, ',');
+
+ empty_poll_train = false;
+
+ if (num_arg == 0)
+ return 0;
+
+ if (num_arg == 3) {
+
+ training_flag = strtoul(str_fld[0], &end, 0);
+ med_edpi = strtoul(str_fld[1], &end, 0);
+ hgh_edpi = strtoul(str_fld[2], &end, 0);
+
+ if (training_flag == 1)
+ empty_poll_train = true;
+
+ if (med_edpi > 0)
+ ep_med_edpi = med_edpi;
+
+ if (med_edpi > 0)
+ ep_hgh_edpi = hgh_edpi;
+
+ } else {
+
+ return -1;
+ }
+
+ return 0;
+}
#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
/* Parse the argument given in the command line of the application */
@@ -1231,6 +1425,7 @@ parse_args(int argc, char **argv)
int opt, ret;
char **argvopt;
int option_index;
+ uint32_t limit;
char *prgname = argv[0];
static struct option lgopts[] = {
{"config", 1, 0, 0},
@@ -1238,13 +1433,14 @@ parse_args(int argc, char **argv)
{"high-perf-cores", 1, 0, 0},
{"no-numa", 0, 0, 0},
{"enable-jumbo", 0, 0, 0},
+ {"empty-poll", 1, 0, 0},
{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
{NULL, 0, 0, 0}
};
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "p:P",
+ while ((opt = getopt_long(argc, argvopt, "p:l:m:h:P",
lgopts, &option_index)) != EOF) {
switch (opt) {
@@ -1261,7 +1457,18 @@ parse_args(int argc, char **argv)
printf("Promiscuous mode selected\n");
promiscuous_on = 1;
break;
-
+ case 'l':
+ limit = parse_max_pkt_len(optarg);
+ freq_tlb[LOW] = limit;
+ break;
+ case 'm':
+ limit = parse_max_pkt_len(optarg);
+ freq_tlb[MED] = limit;
+ break;
+ case 'h':
+ limit = parse_max_pkt_len(optarg);
+ freq_tlb[HGH] = limit;
+ break;
/* long options */
case 0:
if (!strncmp(lgopts[option_index].name, "config", 6)) {
@@ -1300,6 +1507,20 @@ parse_args(int argc, char **argv)
}
if (!strncmp(lgopts[option_index].name,
+ "empty-poll", 10)) {
+ printf("empty-poll is enabled\n");
+ empty_poll_on = true;
+ ret = parse_ep_config(optarg);
+
+ if (ret) {
+ printf("invalid empty poll config\n");
+ print_usage(prgname);
+ return -1;
+ }
+
+ }
+
+ if (!strncmp(lgopts[option_index].name,
"enable-jumbo", 12)) {
struct option lenopts =
{"max-pkt-len", required_argument, \
@@ -1647,6 +1868,59 @@ init_power_library(void)
}
return ret;
}
+static void
+empty_poll_setup_timer(void)
+{
+ int lcore_id = rte_lcore_id();
+ uint64_t hz = rte_get_timer_hz();
+
+ struct ep_params *ep_ptr = ep_params;
+
+ ep_ptr->interval_ticks = hz / INTERVALS_PER_SECOND;
+
+ rte_timer_reset_sync(&ep_ptr->timer0,
+ ep_ptr->interval_ticks,
+ PERIODICAL,
+ lcore_id,
+ rte_empty_poll_detection,
+ (void *)ep_ptr);
+
+}
+static int
+launch_timer(unsigned int lcore_id)
+{
+ int64_t prev_tsc = 0, cur_tsc, diff_tsc, cycles_10ms;
+
+ RTE_SET_USED(lcore_id);
+
+
+ if (rte_get_master_lcore() != lcore_id) {
+ rte_panic("timer on lcore:%d which is not master core:%d\n",
+ lcore_id,
+ rte_get_master_lcore());
+ }
+
+ RTE_LOG(INFO, POWER, "Bring up the Timer\n");
+
+ empty_poll_setup_timer();
+
+ cycles_10ms = rte_get_timer_hz() / 100;
+
+ while (!is_done()) {
+ cur_tsc = rte_rdtsc();
+ diff_tsc = cur_tsc - prev_tsc;
+ if (diff_tsc > cycles_10ms) {
+ rte_timer_manage();
+ prev_tsc = cur_tsc;
+ cycles_10ms = rte_get_timer_hz() / 100;
+ }
+ }
+
+ RTE_LOG(INFO, POWER, "Timer_subsystem is done\n");
+
+ return 0;
+}
+
int
main(int argc, char **argv)
@@ -1829,13 +2103,15 @@ main(int argc, char **argv)
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
- /* init timer structures for each enabled lcore */
- rte_timer_init(&power_timers[lcore_id]);
- hz = rte_get_timer_hz();
- rte_timer_reset(&power_timers[lcore_id],
- hz/TIMER_NUMBER_PER_SECOND, SINGLE, lcore_id,
- power_timer_cb, NULL);
-
+ if (empty_poll_on == false) {
+ /* init timer structures for each enabled lcore */
+ rte_timer_init(&power_timers[lcore_id]);
+ hz = rte_get_timer_hz();
+ rte_timer_reset(&power_timers[lcore_id],
+ hz/TIMER_NUMBER_PER_SECOND,
+ SINGLE, lcore_id,
+ power_timer_cb, NULL);
+ }
qconf = &lcore_conf[lcore_id];
printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
fflush(stdout);
@@ -1906,12 +2182,43 @@ main(int argc, char **argv)
check_all_ports_link_status(enabled_port_mask);
+ if (empty_poll_on == true) {
+
+ if (empty_poll_train) {
+ policy.state = TRAINING;
+ } else {
+ policy.state = MED_NORMAL;
+ policy.med_base_edpi = ep_med_edpi;
+ policy.hgh_base_edpi = ep_hgh_edpi;
+ }
+
+ ret = rte_power_empty_poll_stat_init(&ep_params,
+ freq_tlb,
+ &policy);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "empty poll init failed");
+ }
+
+
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+ if (empty_poll_on == false) {
+ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+ } else {
+ empty_poll_stop = false;
+ rte_eal_mp_remote_launch(main_empty_poll_loop, NULL,
+ SKIP_MASTER);
+ }
+
+ if (empty_poll_on == true)
+ launch_timer(rte_lcore_id());
+
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
+ if (empty_poll_on)
+ rte_power_empty_poll_stat_free();
+
return 0;
}
diff --git a/examples/l3fwd-power/meson.build b/examples/l3fwd-power/meson.build
index 20c80543..a3c5c2f1 100644
--- a/examples/l3fwd-power/meson.build
+++ b/examples/l3fwd-power/meson.build
@@ -9,6 +9,7 @@
if host_machine.system() != 'linux'
build = false
endif
+allow_experimental_apis = true
deps += ['power', 'timer', 'lpm', 'hash']
sources = files(
'main.c', 'perf_core.c'
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index 5edd91a7..41137f97 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -161,8 +161,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_CHECKSUM),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index ab019b9e..e4b99efe 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -120,8 +120,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_CHECKSUM),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index 3b732076..f3346d23 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -79,7 +79,6 @@ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/load_balancer/Makefile b/examples/load_balancer/Makefile
index fc8df71e..197b019d 100644
--- a/examples/load_balancer/Makefile
+++ b/examples/load_balancer/Makefile
@@ -50,7 +50,6 @@ include $(RTE_SDK)/mk/rte.vars.mk
CFLAGS += -O3 -g
CFLAGS += $(WERROR_FLAGS)
-CFLAGS_config.o := -D_GNU_SOURCE
# workaround for a gcc bug with noreturn attribute
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c
index f2045f23..3ab7d021 100644
--- a/examples/load_balancer/init.c
+++ b/examples/load_balancer/init.c
@@ -45,8 +45,7 @@ static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/meson.build b/examples/meson.build
index 4ee7a111..af81c762 100644
--- a/examples/meson.build
+++ b/examples/meson.build
@@ -22,6 +22,10 @@ default_cflags = machine_args
if cc.has_argument('-Wno-format-truncation')
default_cflags += '-Wno-format-truncation'
endif
+
+# specify -D_GNU_SOURCE unconditionally
+default_cflags += '-D_GNU_SOURCE'
+
foreach example: examples
name = example
build = true
diff --git a/examples/multi_process/Makefile b/examples/multi_process/Makefile
index a6708b7e..b76b02fc 100644
--- a/examples/multi_process/Makefile
+++ b/examples/multi_process/Makefile
@@ -13,5 +13,6 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += client_server_mp
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += simple_mp
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += symmetric_mp
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += hotplug_mp
include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/multi_process/hotplug_mp/Makefile b/examples/multi_process/hotplug_mp/Makefile
new file mode 100644
index 00000000..bc36aeae
--- /dev/null
+++ b/examples/multi_process/hotplug_mp/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = hotplug_mp
+
+# all source are stored in SRCS-y
+SRCS-y := main.c commands.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/multi_process/hotplug_mp/commands.c b/examples/multi_process/hotplug_mp/commands.c
new file mode 100644
index 00000000..b0685939
--- /dev/null
+++ b/examples/multi_process/hotplug_mp/commands.c
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline.h>
+#include <rte_ethdev.h>
+
+/**********************************************************/
+
+struct cmd_help_result {
+ cmdline_fixed_string_t help;
+};
+
+static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ cmdline_printf(cl,
+ "commands:\n"
+ "- attach <devargs>\n"
+ "- detach <devargs>\n"
+ "- list\n\n");
+}
+
+cmdline_parse_token_string_t cmd_help_help =
+ TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
+
+cmdline_parse_inst_t cmd_help = {
+ .f = cmd_help_parsed, /* function to call */
+ .data = NULL, /* 2nd arg of func */
+ .help_str = "show help",
+ .tokens = { /* token list, NULL terminated */
+ (void *)&cmd_help_help,
+ NULL,
+ },
+};
+
+/**********************************************************/
+
+struct cmd_quit_result {
+ cmdline_fixed_string_t quit;
+};
+
+static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ cmdline_quit(cl);
+}
+
+cmdline_parse_token_string_t cmd_quit_quit =
+ TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+cmdline_parse_inst_t cmd_quit = {
+ .f = cmd_quit_parsed, /* function to call */
+ .data = NULL, /* 2nd arg of func */
+ .help_str = "quit",
+ .tokens = { /* token list, NULL terminated */
+ (void *)&cmd_quit_quit,
+ NULL,
+ },
+};
+
+/**********************************************************/
+
+struct cmd_list_result {
+ cmdline_fixed_string_t list;
+};
+
+static void cmd_list_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ uint16_t port_id;
+ char dev_name[RTE_DEV_NAME_MAX_LEN];
+
+ cmdline_printf(cl, "list all etherdev\n");
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ rte_eth_dev_get_name_by_port(port_id, dev_name);
+ if (strlen(dev_name) > 0)
+ cmdline_printf(cl, "%d\t%s\n", port_id, dev_name);
+ else
+ printf("empty dev_name is not expected!\n");
+ }
+}
+
+cmdline_parse_token_string_t cmd_list_list =
+ TOKEN_STRING_INITIALIZER(struct cmd_list_result, list, "list");
+
+cmdline_parse_inst_t cmd_list = {
+ .f = cmd_list_parsed, /* function to call */
+ .data = NULL, /* 2nd arg of func */
+ .help_str = "list all devices",
+ .tokens = { /* token list, NULL terminated */
+ (void *)&cmd_list_list,
+ NULL,
+ },
+};
+
+/**********************************************************/
+
+struct cmd_dev_attach_result {
+ cmdline_fixed_string_t attach;
+ cmdline_fixed_string_t devargs;
+};
+
+static void cmd_dev_attach_parsed(void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_dev_attach_result *res = parsed_result;
+ struct rte_devargs da;
+
+ memset(&da, 0, sizeof(da));
+
+ if (rte_devargs_parsef(&da, "%s", res->devargs)) {
+ cmdline_printf(cl, "cannot parse devargs\n");
+ if (da.args)
+ free(da.args);
+ return;
+ }
+
+ if (!rte_eal_hotplug_add(da.bus->name, da.name, da.args))
+ cmdline_printf(cl, "attached device %s\n", da.name);
+ else
+ cmdline_printf(cl, "failed to attached device %s\n",
+ da.name);
+}
+
+cmdline_parse_token_string_t cmd_dev_attach_attach =
+ TOKEN_STRING_INITIALIZER(struct cmd_dev_attach_result, attach,
+ "attach");
+cmdline_parse_token_string_t cmd_dev_attach_devargs =
+ TOKEN_STRING_INITIALIZER(struct cmd_dev_attach_result, devargs, NULL);
+
+cmdline_parse_inst_t cmd_attach_device = {
+ .f = cmd_dev_attach_parsed, /* function to call */
+ .data = NULL, /* 2nd arg of func */
+ .help_str = "attach a device",
+ .tokens = { /* token list, NULL terminated */
+ (void *)&cmd_dev_attach_attach,
+ (void *)&cmd_dev_attach_devargs,
+ NULL,
+ },
+};
+
+/**********************************************************/
+
+struct cmd_dev_detach_result {
+ cmdline_fixed_string_t detach;
+ cmdline_fixed_string_t devargs;
+};
+
+static void cmd_dev_detach_parsed(void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_dev_detach_result *res = parsed_result;
+ struct rte_devargs da;
+
+ memset(&da, 0, sizeof(da));
+
+ if (rte_devargs_parsef(&da, "%s", res->devargs)) {
+ cmdline_printf(cl, "cannot parse devargs\n");
+ if (da.args)
+ free(da.args);
+ return;
+ }
+
+ printf("detaching...\n");
+ if (!rte_eal_hotplug_remove(da.bus->name, da.name))
+ cmdline_printf(cl, "detached device %s\n",
+ da.name);
+ else
+ cmdline_printf(cl, "failed to dettach device %s\n",
+ da.name);
+}
+
+cmdline_parse_token_string_t cmd_dev_detach_detach =
+ TOKEN_STRING_INITIALIZER(struct cmd_dev_detach_result, detach,
+ "detach");
+
+cmdline_parse_token_string_t cmd_dev_detach_devargs =
+ TOKEN_STRING_INITIALIZER(struct cmd_dev_detach_result, devargs, NULL);
+
+cmdline_parse_inst_t cmd_detach_device = {
+ .f = cmd_dev_detach_parsed, /* function to call */
+ .data = NULL, /* 2nd arg of func */
+ .help_str = "detach a device",
+ .tokens = { /* token list, NULL terminated */
+ (void *)&cmd_dev_detach_detach,
+ (void *)&cmd_dev_detach_devargs,
+ NULL,
+ },
+};
+
+/**********************************************************/
+/**********************************************************/
+/****** CONTEXT (list of instruction) */
+
+cmdline_parse_ctx_t main_ctx[] = {
+ (cmdline_parse_inst_t *)&cmd_help,
+ (cmdline_parse_inst_t *)&cmd_quit,
+ (cmdline_parse_inst_t *)&cmd_list,
+ (cmdline_parse_inst_t *)&cmd_attach_device,
+ (cmdline_parse_inst_t *)&cmd_detach_device,
+ NULL,
+};
diff --git a/examples/multi_process/hotplug_mp/commands.h b/examples/multi_process/hotplug_mp/commands.h
new file mode 100644
index 00000000..afcf177d
--- /dev/null
+++ b/examples/multi_process/hotplug_mp/commands.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _COMMANDS_H_
+#define _COMMANDS_H_
+
+extern cmdline_parse_ctx_t main_ctx[];
+
+#endif /* _COMMANDS_H_ */
diff --git a/examples/multi_process/hotplug_mp/main.c b/examples/multi_process/hotplug_mp/main.c
new file mode 100644
index 00000000..d6685807
--- /dev/null
+++ b/examples/multi_process/hotplug_mp/main.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <termios.h>
+#include <sys/queue.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_debug.h>
+
+#include "commands.h"
+
+int main(int argc, char **argv)
+{
+ int ret;
+ struct cmdline *cl;
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("Cannot init EAL\n");
+
+ cl = cmdline_stdin_new(main_ctx, "example> ");
+ if (cl == NULL)
+ rte_panic("Cannot create cmdline instance\n");
+ cmdline_interact(cl);
+ cmdline_stdin_exit(cl);
+
+ rte_eal_cleanup();
+
+ return 0;
+}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index c6c6a537..c310e942 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -178,8 +178,7 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/netmap_compat/bridge/bridge.c b/examples/netmap_compat/bridge/bridge.c
index 7afca28c..216e0105 100644
--- a/examples/netmap_compat/bridge/bridge.c
+++ b/examples/netmap_compat/bridge/bridge.c
@@ -26,7 +26,6 @@
struct rte_eth_conf eth_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 5392fcea..4f8747bc 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -2,8 +2,6 @@
* Copyright(c) 2010-2016 Intel Corporation
*/
-#define _GNU_SOURCE
-
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
@@ -306,8 +304,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c
index 7d0d5811..03ff3943 100644
--- a/examples/performance-thread/pthread_shim/main.c
+++ b/examples/performance-thread/pthread_shim/main.c
@@ -2,7 +2,6 @@
* Copyright(c) 2015 Intel Corporation
*/
-#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
diff --git a/examples/performance-thread/pthread_shim/pthread_shim.c b/examples/performance-thread/pthread_shim/pthread_shim.c
index 53f12437..a02de067 100644
--- a/examples/performance-thread/pthread_shim/pthread_shim.c
+++ b/examples/performance-thread/pthread_shim/pthread_shim.c
@@ -6,7 +6,6 @@
#include <stdlib.h>
#include <sys/types.h>
#include <errno.h>
-#define __USE_GNU
#include <sched.h>
#include <dlfcn.h>
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index 5cf4e9df..9b011244 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -56,8 +56,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/qos_sched/Makefile b/examples/qos_sched/Makefile
index a7ecf978..45b0a9eb 100644
--- a/examples/qos_sched/Makefile
+++ b/examples/qos_sched/Makefile
@@ -57,8 +57,6 @@ else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-CFLAGS_args.o := -D_GNU_SOURCE
-CFLAGS_cfg_file.o := -D_GNU_SOURCE
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 94cbb26f..37c2b95f 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -59,7 +59,6 @@ static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_DCB_NONE,
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index 19164385..5a0f64f4 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -24,7 +24,6 @@
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_DCB_NONE,
diff --git a/examples/service_cores/main.c b/examples/service_cores/main.c
index 2cd57290..c7c79281 100644
--- a/examples/service_cores/main.c
+++ b/examples/service_cores/main.c
@@ -118,6 +118,12 @@ apply_profile(int profile_id)
struct profile *p = &profiles[profile_id];
const uint8_t core_off = 1;
+ if (p->num_cores > rte_lcore_count() + 1) {
+ printf("insufficent cores to run (%s)",
+ p->name);
+ return;
+ }
+
for (i = 0; i < p->num_cores; i++) {
uint32_t core = i + core_off;
ret = rte_service_lcore_add(core);
diff --git a/examples/tep_termination/Makefile b/examples/tep_termination/Makefile
index 8ec1a38e..4c156432 100644
--- a/examples/tep_termination/Makefile
+++ b/examples/tep_termination/Makefile
@@ -60,7 +60,6 @@ endif
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -D_GNU_SOURCE
include $(RTE_SDK)/mk/rte.extapp.mk
endif
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index b99ab97d..ad7fbe9c 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -69,7 +69,6 @@ uint8_t tep_filter_type[] = {RTE_TUNNEL_FILTER_IMAC_TENID,
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/vdpa/Makefile b/examples/vdpa/Makefile
new file mode 100644
index 00000000..42672a2b
--- /dev/null
+++ b/examples/vdpa/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(info This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+else
+
+# binary name
+APP = vdpa
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -D_GNU_SOURCE
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+
+endif
diff --git a/examples/vdpa/main.c b/examples/vdpa/main.c
new file mode 100644
index 00000000..d2e2cb7c
--- /dev/null
+++ b/examples/vdpa/main.c
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <getopt.h>
+#include <signal.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+
+#include <cmdline_parse.h>
+#include <cmdline_socket.h>
+#include <cmdline_parse_string.h>
+#include <cmdline.h>
+
+#define MAX_PATH_LEN 128
+#define MAX_VDPA_SAMPLE_PORTS 1024
+#define RTE_LOGTYPE_VDPA RTE_LOGTYPE_USER1
+
+struct vdpa_port {
+ char ifname[MAX_PATH_LEN];
+ int did;
+ int vid;
+ uint64_t flags;
+};
+
+static struct vdpa_port vports[MAX_VDPA_SAMPLE_PORTS];
+
+static char iface[MAX_PATH_LEN];
+static int dev_total;
+static int devcnt;
+static int interactive;
+static int client_mode;
+
+/* display usage */
+static void
+vdpa_usage(const char *prgname)
+{
+ printf("Usage: %s [EAL options] -- "
+ " --interactive|-i: run in interactive mode.\n"
+ " --iface <path>: specify the path prefix of the socket files, e.g. /tmp/vhost-user-.\n"
+ " --client: register a vhost-user socket as client mode.\n",
+ prgname);
+}
+
+static int
+parse_args(int argc, char **argv)
+{
+ static const char *short_option = "i";
+ static struct option long_option[] = {
+ {"iface", required_argument, NULL, 0},
+ {"interactive", no_argument, &interactive, 1},
+ {"client", no_argument, &client_mode, 1},
+ {NULL, 0, 0, 0},
+ };
+ int opt, idx;
+ char *prgname = argv[0];
+
+ while ((opt = getopt_long(argc, argv, short_option, long_option, &idx))
+ != EOF) {
+ switch (opt) {
+ case 'i':
+ printf("Interactive-mode selected\n");
+ interactive = 1;
+ break;
+ /* long options */
+ case 0:
+ if (strncmp(long_option[idx].name, "iface",
+ MAX_PATH_LEN) == 0) {
+ rte_strscpy(iface, optarg, MAX_PATH_LEN);
+ printf("iface %s\n", iface);
+ }
+ if (!strcmp(long_option[idx].name, "interactive")) {
+ printf("Interactive-mode selected\n");
+ interactive = 1;
+ }
+ break;
+
+ default:
+ vdpa_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (iface[0] == '\0' && interactive == 0) {
+ vdpa_usage(prgname);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+new_device(int vid)
+{
+ char ifname[MAX_PATH_LEN];
+ int i;
+
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+ for (i = 0; i < MAX_VDPA_SAMPLE_PORTS; i++) {
+ if (strncmp(ifname, vports[i].ifname, MAX_PATH_LEN) == 0) {
+ printf("\nnew port %s, did: %d\n",
+ ifname, vports[i].did);
+ vports[i].vid = vid;
+ break;
+ }
+ }
+
+ if (i >= MAX_VDPA_SAMPLE_PORTS)
+ return -1;
+
+ return 0;
+}
+
+static void
+destroy_device(int vid)
+{
+ char ifname[MAX_PATH_LEN];
+ int i;
+
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+ for (i = 0; i < MAX_VDPA_SAMPLE_PORTS; i++) {
+ if (strcmp(ifname, vports[i].ifname) == 0) {
+ printf("\ndestroy port %s, did: %d\n",
+ ifname, vports[i].did);
+ break;
+ }
+ }
+}
+
+static const struct vhost_device_ops vdpa_sample_devops = {
+ .new_device = new_device,
+ .destroy_device = destroy_device,
+};
+
+static int
+start_vdpa(struct vdpa_port *vport)
+{
+ int ret;
+ char *socket_path = vport->ifname;
+ int did = vport->did;
+
+ if (client_mode)
+ vport->flags |= RTE_VHOST_USER_CLIENT;
+
+ if (access(socket_path, F_OK) != -1 && !client_mode) {
+ RTE_LOG(ERR, VDPA,
+ "%s exists, please remove it or specify another file and try again.\n",
+ socket_path);
+ return -1;
+ }
+ ret = rte_vhost_driver_register(socket_path, vport->flags);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "register driver failed: %s\n",
+ socket_path);
+
+ ret = rte_vhost_driver_callback_register(socket_path,
+ &vdpa_sample_devops);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "register driver ops failed: %s\n",
+ socket_path);
+
+ ret = rte_vhost_driver_attach_vdpa_device(socket_path, did);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "attach vdpa device failed: %s\n",
+ socket_path);
+
+ if (rte_vhost_driver_start(socket_path) < 0)
+ rte_exit(EXIT_FAILURE,
+ "start vhost driver failed: %s\n",
+ socket_path);
+ return 0;
+}
+
+static void
+close_vdpa(struct vdpa_port *vport)
+{
+ int ret;
+ char *socket_path = vport->ifname;
+
+ ret = rte_vhost_driver_detach_vdpa_device(socket_path);
+ if (ret != 0)
+ RTE_LOG(ERR, VDPA,
+ "detach vdpa device failed: %s\n",
+ socket_path);
+
+ ret = rte_vhost_driver_unregister(socket_path);
+ if (ret != 0)
+ RTE_LOG(ERR, VDPA,
+ "Fail to unregister vhost driver for %s.\n",
+ socket_path);
+}
+
+static void
+vdpa_sample_quit(void)
+{
+ int i;
+ for (i = 0; i < RTE_MIN(MAX_VDPA_SAMPLE_PORTS, dev_total); i++) {
+ if (vports[i].ifname[0] != '\0')
+ close_vdpa(&vports[i]);
+ }
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\nSignal %d received, preparing to exit...\n", signum);
+ vdpa_sample_quit();
+ exit(0);
+ }
+}
+
+/* interactive cmds */
+
+/* *** Help command with introduction. *** */
+struct cmd_help_result {
+ cmdline_fixed_string_t help;
+};
+
+static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ cmdline_printf(
+ cl,
+ "\n"
+ "The following commands are currently available:\n\n"
+ "Control:\n"
+ " help : Show interactive instructions.\n"
+ " list : list all available vdpa devices.\n"
+ " create <socket file> <vdev addr> : create a new vdpa port.\n"
+ " quit : exit vdpa sample app.\n"
+ );
+}
+
+cmdline_parse_token_string_t cmd_help_help =
+ TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
+
+cmdline_parse_inst_t cmd_help = {
+ .f = cmd_help_parsed,
+ .data = NULL,
+ .help_str = "show help",
+ .tokens = {
+ (void *)&cmd_help_help,
+ NULL,
+ },
+};
+
+/* *** List all available vdpa devices *** */
+struct cmd_list_result {
+ cmdline_fixed_string_t action;
+};
+
+static void cmd_list_vdpa_devices_parsed(
+ __attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int did;
+ uint32_t queue_num;
+ uint64_t features;
+ struct rte_vdpa_device *vdev;
+ struct rte_pci_addr addr;
+
+ cmdline_printf(cl, "device id\tdevice address\tqueue num\tsupported features\n");
+ for (did = 0; did < dev_total; did++) {
+ vdev = rte_vdpa_get_device(did);
+ if (!vdev)
+ continue;
+ if (vdev->ops->get_queue_num(did, &queue_num) < 0) {
+ RTE_LOG(ERR, VDPA,
+ "failed to get vdpa queue number "
+ "for device id %d.\n", did);
+ continue;
+ }
+ if (vdev->ops->get_features(did, &features) < 0) {
+ RTE_LOG(ERR, VDPA,
+ "failed to get vdpa features "
+ "for device id %d.\n", did);
+ continue;
+ }
+ addr = vdev->addr.pci_addr;
+ cmdline_printf(cl,
+ "%d\t\t" PCI_PRI_FMT "\t%" PRIu32 "\t\t0x%" PRIx64 "\n",
+ did, addr.domain, addr.bus, addr.devid,
+ addr.function, queue_num, features);
+ }
+}
+
+cmdline_parse_token_string_t cmd_action_list =
+ TOKEN_STRING_INITIALIZER(struct cmd_list_result, action, "list");
+
+cmdline_parse_inst_t cmd_list_vdpa_devices = {
+ .f = cmd_list_vdpa_devices_parsed,
+ .data = NULL,
+ .help_str = "list all available vdpa devices",
+ .tokens = {
+ (void *)&cmd_action_list,
+ NULL,
+ },
+};
+
+/* *** Create new vdpa port *** */
+struct cmd_create_result {
+ cmdline_fixed_string_t action;
+ cmdline_fixed_string_t socket_path;
+ cmdline_fixed_string_t bdf;
+};
+
+static void cmd_create_vdpa_port_parsed(void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int did;
+ struct cmd_create_result *res = parsed_result;
+ struct rte_vdpa_dev_addr addr;
+
+ rte_strscpy(vports[devcnt].ifname, res->socket_path, MAX_PATH_LEN);
+ if (rte_pci_addr_parse(res->bdf, &addr.pci_addr) != 0) {
+ cmdline_printf(cl, "Unable to parse the given bdf.\n");
+ return;
+ }
+ addr.type = PCI_ADDR;
+ did = rte_vdpa_find_device_id(&addr);
+ if (did < 0) {
+ cmdline_printf(cl, "Unable to find vdpa device id.\n");
+ return;
+ }
+
+ vports[devcnt].did = did;
+
+ if (start_vdpa(&vports[devcnt]) == 0)
+ devcnt++;
+}
+
+cmdline_parse_token_string_t cmd_action_create =
+ TOKEN_STRING_INITIALIZER(struct cmd_create_result, action, "create");
+cmdline_parse_token_string_t cmd_socket_path =
+ TOKEN_STRING_INITIALIZER(struct cmd_create_result, socket_path, NULL);
+cmdline_parse_token_string_t cmd_bdf =
+ TOKEN_STRING_INITIALIZER(struct cmd_create_result, bdf, NULL);
+
+cmdline_parse_inst_t cmd_create_vdpa_port = {
+ .f = cmd_create_vdpa_port_parsed,
+ .data = NULL,
+ .help_str = "create a new vdpa port",
+ .tokens = {
+ (void *)&cmd_action_create,
+ (void *)&cmd_socket_path,
+ (void *)&cmd_bdf,
+ NULL,
+ },
+};
+
+/* *** QUIT *** */
+struct cmd_quit_result {
+ cmdline_fixed_string_t quit;
+};
+
+static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ vdpa_sample_quit();
+ cmdline_quit(cl);
+}
+
+cmdline_parse_token_string_t cmd_quit_quit =
+ TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+cmdline_parse_inst_t cmd_quit = {
+ .f = cmd_quit_parsed,
+ .data = NULL,
+ .help_str = "quit: exit application",
+ .tokens = {
+ (void *)&cmd_quit_quit,
+ NULL,
+ },
+};
+cmdline_parse_ctx_t main_ctx[] = {
+ (cmdline_parse_inst_t *)&cmd_help,
+ (cmdline_parse_inst_t *)&cmd_list_vdpa_devices,
+ (cmdline_parse_inst_t *)&cmd_create_vdpa_port,
+ (cmdline_parse_inst_t *)&cmd_quit,
+ NULL,
+};
+
+int
+main(int argc, char *argv[])
+{
+ char ch;
+ int i;
+ int ret;
+ struct cmdline *cl;
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "eal init failed\n");
+ argc -= ret;
+ argv += ret;
+
+ dev_total = rte_vdpa_get_device_num();
+ if (dev_total <= 0)
+ rte_exit(EXIT_FAILURE, "No available vdpa device found\n");
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ ret = parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "invalid argument\n");
+
+ if (interactive == 1) {
+ cl = cmdline_stdin_new(main_ctx, "vdpa> ");
+ if (cl == NULL)
+ rte_panic("Cannot create cmdline instance\n");
+ cmdline_interact(cl);
+ cmdline_stdin_exit(cl);
+ } else {
+ for (i = 0; i < RTE_MIN(MAX_VDPA_SAMPLE_PORTS, dev_total);
+ i++) {
+ vports[i].did = i;
+ snprintf(vports[i].ifname, MAX_PATH_LEN, "%s%d",
+ iface, i);
+
+ start_vdpa(&vports[i]);
+ }
+
+ printf("enter \'q\' to quit\n");
+ while (scanf("%c", &ch)) {
+ if (ch == 'q')
+ break;
+ while (ch != '\n') {
+ if (scanf("%c", &ch))
+ printf("%c", ch);
+ }
+ printf("enter \'q\' to quit\n");
+ }
+ vdpa_sample_quit();
+ }
+
+ return 0;
+}
diff --git a/examples/vdpa/meson.build b/examples/vdpa/meson.build
new file mode 100644
index 00000000..2e38a069
--- /dev/null
+++ b/examples/vdpa/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += 'vhost'
+allow_experimental_apis = true
+sources = files(
+ 'main.c'
+) \ No newline at end of file
diff --git a/examples/vhost/Makefile b/examples/vhost/Makefile
index a2ea97a0..c6964381 100644
--- a/examples/vhost/Makefile
+++ b/examples/vhost/Makefile
@@ -61,7 +61,6 @@ else
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -D_GNU_SOURCE
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 2175c118..dc9ea101 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -58,9 +58,6 @@
/* Max number of devices. Limited by vmdq. */
#define MAX_DEVICES 64
-/* Size of buffers used for snprintfs. */
-#define MAX_PRINT_BUFF 6072
-
/* Maximum long option length for option parsing. */
#define MAX_LONG_OPT_SZ 64
@@ -121,8 +118,7 @@ static struct rte_eth_conf vmdq_conf_default = {
* this fixes bug of ipv4 forwarding in guest can't
* forward pakets from one virtio dev to another virtio dev.
*/
- .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_VLAN_STRIP),
+ .offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
},
.txmode = {
diff --git a/examples/vhost_crypto/Makefile b/examples/vhost_crypto/Makefile
index 83d33101..a620abf4 100644
--- a/examples/vhost_crypto/Makefile
+++ b/examples/vhost_crypto/Makefile
@@ -25,7 +25,6 @@ SRCS-y := main.c
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -D_GNU_SOURCE
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/vhost_crypto/main.c b/examples/vhost_crypto/main.c
index f334d712..cbb5e49d 100644
--- a/examples/vhost_crypto/main.c
+++ b/examples/vhost_crypto/main.c
@@ -14,6 +14,7 @@
#include <rte_vhost.h>
#include <rte_cryptodev.h>
#include <rte_vhost_crypto.h>
+#include <rte_string_fns.h>
#include <cmdline_rdline.h>
#include <cmdline_parse.h>
@@ -29,97 +30,161 @@
#define SESSION_MAP_ENTRIES (1024)
#define REFRESH_TIME_SEC (3)
-#define MAX_NB_SOCKETS (32)
-#define DEF_SOCKET_FILE "/tmp/vhost_crypto1.socket"
+#define MAX_NB_SOCKETS (4)
+#define MAX_NB_WORKER_CORES (16)
-struct vhost_crypto_options {
+struct lcore_option {
+ uint32_t lcore_id;
char *socket_files[MAX_NB_SOCKETS];
uint32_t nb_sockets;
uint8_t cid;
uint16_t qid;
- uint32_t zero_copy;
- uint32_t guest_polling;
-} options;
+};
struct vhost_crypto_info {
int vids[MAX_NB_SOCKETS];
+ uint32_t nb_vids;
struct rte_mempool *sess_pool;
struct rte_mempool *cop_pool;
- uint32_t lcore_id;
uint8_t cid;
uint32_t qid;
- uint32_t nb_vids;
+ uint32_t nb_inflight_ops;
volatile uint32_t initialized[MAX_NB_SOCKETS];
+} __rte_cache_aligned;
-} info;
+struct vhost_crypto_options {
+ struct lcore_option los[MAX_NB_WORKER_CORES];
+ struct vhost_crypto_info *infos[MAX_NB_WORKER_CORES];
+ uint32_t nb_los;
+ uint32_t zero_copy;
+ uint32_t guest_polling;
+} options;
+#define CONFIG_KEYWORD "config"
#define SOCKET_FILE_KEYWORD "socket-file"
-#define CRYPTODEV_ID_KEYWORD "cdev-id"
-#define CRYPTODEV_QUEUE_KEYWORD "cdev-queue-id"
#define ZERO_COPY_KEYWORD "zero-copy"
#define POLLING_KEYWORD "guest-polling"
-uint64_t vhost_cycles[2], last_v_cycles[2];
-uint64_t outpkt_amount;
+#define NB_SOCKET_FIELDS (2)
+
+static uint32_t
+find_lo(uint32_t lcore_id)
+{
+ uint32_t i;
+
+ for (i = 0; i < options.nb_los; i++)
+ if (options.los[i].lcore_id == lcore_id)
+ return i;
+
+ return UINT32_MAX;
+}
/** support *SOCKET_FILE_PATH:CRYPTODEV_ID* format */
static int
parse_socket_arg(char *arg)
{
- uint32_t nb_sockets = options.nb_sockets;
- size_t len = strlen(arg);
+ uint32_t nb_sockets;
+ uint32_t lcore_id;
+ char *str_fld[NB_SOCKET_FIELDS];
+ struct lcore_option *lo;
+ uint32_t idx;
+ char *end;
+
+ if (rte_strsplit(arg, strlen(arg), str_fld, NB_SOCKET_FIELDS, ',') !=
+ NB_SOCKET_FIELDS) {
+ RTE_LOG(ERR, USER1, "Invalid socket parameter '%s'\n", arg);
+ return -EINVAL;
+ }
+
+ errno = 0;
+ lcore_id = strtoul(str_fld[0], &end, 0);
+ if (errno != 0 || end == str_fld[0] || lcore_id > 255)
+ return -EINVAL;
+
+ idx = find_lo(lcore_id);
+ if (idx == UINT32_MAX) {
+ if (options.nb_los == MAX_NB_WORKER_CORES)
+ return -ENOMEM;
+ lo = &options.los[options.nb_los];
+ lo->lcore_id = lcore_id;
+ options.nb_los++;
+ } else
+ lo = &options.los[idx];
+
+ nb_sockets = lo->nb_sockets;
if (nb_sockets >= MAX_NB_SOCKETS) {
RTE_LOG(ERR, USER1, "Too many socket files!\n");
return -ENOMEM;
}
- options.socket_files[nb_sockets] = rte_malloc(NULL, len, 0);
- if (!options.socket_files[nb_sockets]) {
+ lo->socket_files[nb_sockets] = strdup(str_fld[1]);
+ if (!lo->socket_files[nb_sockets]) {
RTE_LOG(ERR, USER1, "Insufficient memory\n");
return -ENOMEM;
}
- rte_memcpy(options.socket_files[nb_sockets], arg, len);
-
- options.nb_sockets++;
+ lo->nb_sockets++;
return 0;
}
static int
-parse_cryptodev_id(const char *q_arg)
+parse_config(char *q_arg)
{
- char *end = NULL;
- uint64_t pm;
-
- /* parse decimal string */
- pm = strtoul(q_arg, &end, 10);
- if (pm > rte_cryptodev_count()) {
- RTE_LOG(ERR, USER1, "Invalid Cryptodev ID %s\n", q_arg);
- return -1;
- }
+ struct lcore_option *lo;
+ char s[256];
+ const char *p, *p0 = q_arg;
+ char *end;
+ enum fieldnames {
+ FLD_LCORE = 0,
+ FLD_CID,
+ FLD_QID,
+ _NUM_FLD
+ };
+ uint32_t flds[_NUM_FLD];
+ char *str_fld[_NUM_FLD];
+ uint32_t i;
+ uint32_t size;
- options.cid = (uint8_t)pm;
+ while ((p = strchr(p0, '(')) != NULL) {
+ ++p;
+ p0 = strchr(p, ')');
+ if (p0 == NULL)
+ return -1;
- return 0;
-}
+ size = p0 - p;
+ if (size >= sizeof(s))
+ return -1;
-static int
-parse_cdev_queue_id(const char *q_arg)
-{
- char *end = NULL;
- uint64_t pm;
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
+ _NUM_FLD)
+ return -1;
+ for (i = 0; i < _NUM_FLD; i++) {
+ errno = 0;
+ flds[i] = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || end == str_fld[i] || flds[i] > 255)
+ return -EINVAL;
+ }
- /* parse decimal string */
- pm = strtoul(q_arg, &end, 10);
- if (pm == UINT64_MAX) {
- RTE_LOG(ERR, USER1, "Invalid Cryptodev Queue ID %s\n", q_arg);
- return -1;
+ if (flds[FLD_LCORE] > RTE_MAX_LCORE)
+ return -EINVAL;
+
+ i = find_lo(flds[FLD_LCORE]);
+ if (i == UINT32_MAX) {
+ if (options.nb_los == MAX_NB_WORKER_CORES)
+ return -ENOMEM;
+ lo = &options.los[options.nb_los];
+ options.nb_los++;
+ } else
+ lo = &options.los[i];
+
+ lo->lcore_id = flds[FLD_LCORE];
+ lo->cid = flds[FLD_CID];
+ lo->qid = flds[FLD_QID];
}
- options.qid = (uint16_t)pm;
-
return 0;
}
@@ -127,13 +192,12 @@ static void
vhost_crypto_usage(const char *prgname)
{
printf("%s [EAL options] --\n"
- " --%s SOCKET-FILE-PATH\n"
- " --%s CRYPTODEV_ID: crypto device id\n"
- " --%s CDEV_QUEUE_ID: crypto device queue id\n"
+ " --%s <lcore>,SOCKET-FILE-PATH\n"
+ " --%s (lcore,cdev_id,queue_id)[,(lcore,cdev_id,queue_id)]"
" --%s: zero copy\n"
" --%s: guest polling\n",
- prgname, SOCKET_FILE_KEYWORD, CRYPTODEV_ID_KEYWORD,
- CRYPTODEV_QUEUE_KEYWORD, ZERO_COPY_KEYWORD, POLLING_KEYWORD);
+ prgname, SOCKET_FILE_KEYWORD, CONFIG_KEYWORD,
+ ZERO_COPY_KEYWORD, POLLING_KEYWORD);
}
static int
@@ -145,19 +209,12 @@ vhost_crypto_parse_args(int argc, char **argv)
int option_index;
struct option lgopts[] = {
{SOCKET_FILE_KEYWORD, required_argument, 0, 0},
- {CRYPTODEV_ID_KEYWORD, required_argument, 0, 0},
- {CRYPTODEV_QUEUE_KEYWORD, required_argument, 0, 0},
+ {CONFIG_KEYWORD, required_argument, 0, 0},
{ZERO_COPY_KEYWORD, no_argument, 0, 0},
{POLLING_KEYWORD, no_argument, 0, 0},
{NULL, 0, 0, 0}
};
- options.cid = 0;
- options.qid = 0;
- options.nb_sockets = 0;
- options.guest_polling = 0;
- options.zero_copy = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
-
argvopt = argv;
while ((opt = getopt_long(argc, argvopt, "s:",
@@ -173,15 +230,8 @@ vhost_crypto_parse_args(int argc, char **argv)
return ret;
}
} else if (strcmp(lgopts[option_index].name,
- CRYPTODEV_ID_KEYWORD) == 0) {
- ret = parse_cryptodev_id(optarg);
- if (ret < 0) {
- vhost_crypto_usage(prgname);
- return ret;
- }
- } else if (strcmp(lgopts[option_index].name,
- CRYPTODEV_QUEUE_KEYWORD) == 0) {
- ret = parse_cdev_queue_id(optarg);
+ CONFIG_KEYWORD) == 0) {
+ ret = parse_config(optarg);
if (ret < 0) {
vhost_crypto_usage(prgname);
return ret;
@@ -203,22 +253,15 @@ vhost_crypto_parse_args(int argc, char **argv)
}
}
- if (options.nb_sockets == 0) {
- options.socket_files[0] = strdup(DEF_SOCKET_FILE);
- options.nb_sockets = 1;
- RTE_LOG(INFO, USER1,
- "VHOST-CRYPTO: use default socket file %s\n",
- DEF_SOCKET_FILE);
- }
-
return 0;
}
static int
new_device(int vid)
{
+ struct vhost_crypto_info *info = NULL;
char path[PATH_MAX];
- uint32_t idx, i;
+ uint32_t i, j;
int ret;
ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
@@ -227,23 +270,25 @@ new_device(int vid)
return ret;
}
- for (idx = 0; idx < options.nb_sockets; idx++) {
- if (strcmp(path, options.socket_files[idx]) == 0)
+ for (i = 0; i < options.nb_los; i++) {
+ for (j = 0; j < options.los[i].nb_sockets; j++) {
+ if (strcmp(path, options.los[i].socket_files[j]) == 0) {
+ info = options.infos[i];
+ break;
+ }
+ }
+
+ if (info)
break;
}
- if (idx == options.nb_sockets) {
+ if (!info) {
RTE_LOG(ERR, USER1, "Cannot find recorded socket\n");
return -ENOENT;
}
- for (i = 0; i < 2; i++) {
- vhost_cycles[i] = 0;
- last_v_cycles[i] = 0;
- }
-
- ret = rte_vhost_crypto_create(vid, info.cid, info.sess_pool,
- rte_lcore_to_socket_id(info.lcore_id));
+ ret = rte_vhost_crypto_create(vid, info->cid, info->sess_pool,
+ rte_lcore_to_socket_id(options.los[i].lcore_id));
if (ret) {
RTE_LOG(ERR, USER1, "Cannot create vhost crypto\n");
return ret;
@@ -256,8 +301,8 @@ new_device(int vid)
return ret;
}
- info.vids[idx] = vid;
- info.initialized[idx] = 1;
+ info->vids[j] = vid;
+ info->initialized[j] = 1;
rte_wmb();
@@ -269,19 +314,30 @@ new_device(int vid)
static void
destroy_device(int vid)
{
- uint32_t i;
-
- for (i = 0; i < info.nb_vids; i++) {
- if (vid == info.vids[i])
+ struct vhost_crypto_info *info = NULL;
+ uint32_t i, j;
+
+ for (i = 0; i < options.nb_los; i++) {
+ for (j = 0; j < options.los[i].nb_sockets; j++) {
+ if (options.infos[i]->vids[j] == vid) {
+ info = options.infos[i];
+ break;
+ }
+ }
+ if (info)
break;
}
- if (i == info.nb_vids) {
+ if (!info) {
RTE_LOG(ERR, USER1, "Cannot find socket file from list\n");
return;
}
- info.initialized[i] = 0;
+ do {
+
+ } while (info->nb_inflight_ops);
+
+ info->initialized[j] = 0;
rte_wmb();
@@ -302,25 +358,24 @@ static void clrscr(void)
}
static int
-vhost_crypto_worker(__rte_unused void *arg)
+vhost_crypto_worker(void *arg)
{
struct rte_crypto_op *ops[NB_VIRTIO_QUEUES][MAX_PKT_BURST + 1];
struct rte_crypto_op *ops_deq[NB_VIRTIO_QUEUES][MAX_PKT_BURST + 1];
- uint32_t nb_inflight_ops = 0;
+ struct vhost_crypto_info *info = arg;
uint16_t nb_callfds;
int callfds[VIRTIO_CRYPTO_MAX_NUM_BURST_VQS];
uint32_t lcore_id = rte_lcore_id();
uint32_t burst_size = MAX_PKT_BURST;
uint32_t i, j, k;
uint32_t to_fetch, fetched;
- uint64_t t_start, t_end, interval;
int ret = 0;
RTE_LOG(INFO, USER1, "Processing on Core %u started\n", lcore_id);
for (i = 0; i < NB_VIRTIO_QUEUES; i++) {
- if (rte_crypto_op_bulk_alloc(info.cop_pool,
+ if (rte_crypto_op_bulk_alloc(info->cop_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops[i],
burst_size) < burst_size) {
RTE_LOG(ERR, USER1, "Failed to alloc cops\n");
@@ -330,45 +385,38 @@ vhost_crypto_worker(__rte_unused void *arg)
}
while (1) {
- for (i = 0; i < info.nb_vids; i++) {
- if (unlikely(info.initialized[i] == 0))
+ for (i = 0; i < info->nb_vids; i++) {
+ if (unlikely(info->initialized[i] == 0))
continue;
for (j = 0; j < NB_VIRTIO_QUEUES; j++) {
- t_start = rte_rdtsc_precise();
-
to_fetch = RTE_MIN(burst_size,
(NB_CRYPTO_DESCRIPTORS -
- nb_inflight_ops));
+ info->nb_inflight_ops));
fetched = rte_vhost_crypto_fetch_requests(
- info.vids[i], j, ops[j],
+ info->vids[i], j, ops[j],
to_fetch);
- nb_inflight_ops += rte_cryptodev_enqueue_burst(
- info.cid, info.qid, ops[j],
+ info->nb_inflight_ops +=
+ rte_cryptodev_enqueue_burst(
+ info->cid, info->qid, ops[j],
fetched);
if (unlikely(rte_crypto_op_bulk_alloc(
- info.cop_pool,
+ info->cop_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
ops[j], fetched) < fetched)) {
RTE_LOG(ERR, USER1, "Failed realloc\n");
return -1;
}
- t_end = rte_rdtsc_precise();
- interval = t_end - t_start;
-
- vhost_cycles[fetched > 0] += interval;
- t_start = t_end;
fetched = rte_cryptodev_dequeue_burst(
- info.cid, info.qid,
+ info->cid, info->qid,
ops_deq[j], RTE_MIN(burst_size,
- nb_inflight_ops));
+ info->nb_inflight_ops));
fetched = rte_vhost_crypto_finalize_requests(
ops_deq[j], fetched, callfds,
&nb_callfds);
- nb_inflight_ops -= fetched;
- outpkt_amount += fetched;
+ info->nb_inflight_ops -= fetched;
if (!options.guest_polling) {
for (k = 0; k < nb_callfds; k++)
@@ -376,11 +424,8 @@ vhost_crypto_worker(__rte_unused void *arg)
(eventfd_t)1);
}
- rte_mempool_put_bulk(info.cop_pool,
+ rte_mempool_put_bulk(info->cop_pool,
(void **)ops_deq[j], fetched);
- interval = rte_rdtsc_precise() - t_start;
-
- vhost_cycles[fetched > 0] += interval;
}
}
}
@@ -388,17 +433,27 @@ exit:
return ret;
}
-
static void
-unregister_drivers(int socket_num)
+free_resource(void)
{
- int ret;
+ uint32_t i, j;
+
+ for (i = 0; i < options.nb_los; i++) {
+ struct lcore_option *lo = &options.los[i];
+ struct vhost_crypto_info *info = options.infos[i];
+
+ rte_mempool_free(info->cop_pool);
+ rte_mempool_free(info->sess_pool);
+
+ for (j = 0; j < lo->nb_sockets; j++) {
+ rte_vhost_driver_unregister(lo->socket_files[i]);
+ free(lo->socket_files[i]);
+ }
+
+ rte_free(info);
+ }
- ret = rte_vhost_driver_unregister(options.socket_files[socket_num]);
- if (ret != 0)
- RTE_LOG(ERR, USER1,
- "Fail to unregister vhost driver for %s.\n",
- options.socket_files[socket_num]);
+ memset(&options, 0, sizeof(options));
}
int
@@ -407,10 +462,8 @@ main(int argc, char *argv[])
struct rte_cryptodev_qp_conf qp_conf = {NB_CRYPTO_DESCRIPTORS};
struct rte_cryptodev_config config;
struct rte_cryptodev_info dev_info;
- uint32_t cryptodev_id;
- uint32_t worker_lcore;
char name[128];
- uint32_t i = 0;
+ uint32_t i, j, lcore;
int ret;
ret = rte_eal_init(argc, argv);
@@ -423,114 +476,121 @@ main(int argc, char *argv[])
if (ret < 0)
rte_exit(EXIT_FAILURE, "Failed to parse arguments!\n");
- info.cid = options.cid;
- info.qid = options.qid;
+ for (i = 0; i < options.nb_los; i++) {
+ struct lcore_option *lo = &options.los[i];
+ struct vhost_crypto_info *info;
- worker_lcore = rte_get_next_lcore(0, 1, 0);
- if (worker_lcore == RTE_MAX_LCORE)
- rte_exit(EXIT_FAILURE, "Not enough lcore\n");
-
- cryptodev_id = info.cid;
- rte_cryptodev_info_get(cryptodev_id, &dev_info);
- if (dev_info.max_nb_queue_pairs < info.qid + 1) {
- RTE_LOG(ERR, USER1, "Number of queues cannot over %u",
- dev_info.max_nb_queue_pairs);
- goto error_exit;
- }
+ info = rte_zmalloc_socket(NULL, sizeof(*info),
+ RTE_CACHE_LINE_SIZE, rte_lcore_to_socket_id(
+ lo->lcore_id));
+ if (!info) {
+ ret = -ENOMEM;
+ goto error_exit;
+ }
- config.nb_queue_pairs = dev_info.max_nb_queue_pairs;
- config.socket_id = rte_lcore_to_socket_id(worker_lcore);
+ info->cid = lo->cid;
+ info->qid = lo->qid;
+ info->nb_vids = lo->nb_sockets;
- ret = rte_cryptodev_configure(cryptodev_id, &config);
- if (ret < 0) {
- RTE_LOG(ERR, USER1, "Failed to configure cryptodev %u",
- cryptodev_id);
- goto error_exit;
- }
+ rte_cryptodev_info_get(info->cid, &dev_info);
+ if (dev_info.max_nb_queue_pairs < info->qid + 1) {
+ RTE_LOG(ERR, USER1, "Number of queues cannot over %u",
+ dev_info.max_nb_queue_pairs);
+ goto error_exit;
+ }
- snprintf(name, 127, "SESS_POOL_%u", worker_lcore);
- info.sess_pool = rte_mempool_create(name, SESSION_MAP_ENTRIES,
- rte_cryptodev_sym_get_private_session_size(
- cryptodev_id), 64, 0, NULL, NULL, NULL, NULL,
- rte_lcore_to_socket_id(worker_lcore), 0);
- if (!info.sess_pool) {
- RTE_LOG(ERR, USER1, "Failed to create mempool");
- goto error_exit;
- }
+ config.nb_queue_pairs = dev_info.max_nb_queue_pairs;
+ config.socket_id = rte_lcore_to_socket_id(lo->lcore_id);
- snprintf(name, 127, "COPPOOL_%u", worker_lcore);
- info.cop_pool = rte_crypto_op_pool_create(name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MEMPOOL_OBJS,
- NB_CACHE_OBJS, 0, rte_lcore_to_socket_id(worker_lcore));
+ ret = rte_cryptodev_configure(info->cid, &config);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "Failed to configure cryptodev %u",
+ info->cid);
+ goto error_exit;
+ }
- if (!info.cop_pool) {
- RTE_LOG(ERR, USER1, "Lcore %u failed to create crypto pool",
- worker_lcore);
- ret = -1;
- goto error_exit;
- }
+ snprintf(name, 127, "SESS_POOL_%u", lo->lcore_id);
+ info->sess_pool = rte_mempool_create(name, SESSION_MAP_ENTRIES,
+ rte_cryptodev_sym_get_private_session_size(
+ info->cid), 64, 0, NULL, NULL, NULL, NULL,
+ rte_lcore_to_socket_id(lo->lcore_id), 0);
+ if (!info->sess_pool) {
+ RTE_LOG(ERR, USER1, "Failed to create mempool");
+ goto error_exit;
+ }
- info.nb_vids = options.nb_sockets;
- for (i = 0; i < MAX_NB_SOCKETS; i++)
- info.vids[i] = -1;
+ snprintf(name, 127, "COPPOOL_%u", lo->lcore_id);
+ info->cop_pool = rte_crypto_op_pool_create(name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MEMPOOL_OBJS,
+ NB_CACHE_OBJS, 0,
+ rte_lcore_to_socket_id(lo->lcore_id));
- for (i = 0; i < dev_info.max_nb_queue_pairs; i++) {
- ret = rte_cryptodev_queue_pair_setup(cryptodev_id, i,
- &qp_conf, rte_lcore_to_socket_id(worker_lcore),
- info.sess_pool);
- if (ret < 0) {
- RTE_LOG(ERR, USER1, "Failed to configure qp %u\n",
- info.cid);
+ if (!info->cop_pool) {
+ RTE_LOG(ERR, USER1, "Failed to create crypto pool");
+ ret = -ENOMEM;
goto error_exit;
}
- }
- ret = rte_cryptodev_start(cryptodev_id);
- if (ret < 0) {
- RTE_LOG(ERR, USER1, "Failed to start cryptodev %u\n", info.cid);
- goto error_exit;
+ options.infos[i] = info;
+
+ for (j = 0; j < dev_info.max_nb_queue_pairs; j++) {
+ ret = rte_cryptodev_queue_pair_setup(info->cid, j,
+ &qp_conf, rte_lcore_to_socket_id(
+ lo->lcore_id),
+ info->sess_pool);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "Failed to configure qp\n");
+ goto error_exit;
+ }
+ }
}
- info.cid = cryptodev_id;
- info.lcore_id = worker_lcore;
+ for (i = 0; i < options.nb_los; i++) {
+ struct lcore_option *lo = &options.los[i];
+ struct vhost_crypto_info *info = options.infos[i];
- if (rte_eal_remote_launch(vhost_crypto_worker, NULL, worker_lcore)
- < 0) {
- RTE_LOG(ERR, USER1, "Failed to start worker lcore");
- goto error_exit;
- }
+ ret = rte_cryptodev_start(lo->cid);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "Failed to start cryptodev\n");
+ goto error_exit;
+ }
- for (i = 0; i < options.nb_sockets; i++) {
- if (rte_vhost_driver_register(options.socket_files[i],
- RTE_VHOST_USER_DEQUEUE_ZERO_COPY) < 0) {
- RTE_LOG(ERR, USER1, "socket %s already exists\n",
- options.socket_files[i]);
+ if (rte_eal_remote_launch(vhost_crypto_worker, info,
+ lo->lcore_id) < 0) {
+ RTE_LOG(ERR, USER1, "Failed to start worker lcore");
goto error_exit;
}
- rte_vhost_driver_callback_register(options.socket_files[i],
+ for (j = 0; j < lo->nb_sockets; j++) {
+ ret = rte_vhost_driver_register(lo->socket_files[j],
+ RTE_VHOST_USER_DEQUEUE_ZERO_COPY);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "socket %s already exists\n",
+ lo->socket_files[j]);
+ goto error_exit;
+ }
+
+ rte_vhost_driver_callback_register(lo->socket_files[j],
&virtio_crypto_device_ops);
- if (rte_vhost_driver_start(options.socket_files[i]) < 0) {
- RTE_LOG(ERR, USER1, "failed to start vhost driver.\n");
- goto error_exit;
+ ret = rte_vhost_driver_start(lo->socket_files[j]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "failed to start vhost.\n");
+ goto error_exit;
+ }
}
}
- RTE_LCORE_FOREACH(worker_lcore)
- rte_eal_wait_lcore(worker_lcore);
+ RTE_LCORE_FOREACH(lcore)
+ rte_eal_wait_lcore(lcore);
- rte_mempool_free(info.sess_pool);
- rte_mempool_free(info.cop_pool);
+ free_resource();
return 0;
error_exit:
- for (i = 0; i < options.nb_sockets; i++)
- unregister_drivers(i);
- rte_mempool_free(info.cop_pool);
- rte_mempool_free(info.sess_pool);
+ free_resource();
return -1;
}
diff --git a/examples/vhost_crypto/meson.build b/examples/vhost_crypto/meson.build
index 0f4876f0..daf19fb8 100644
--- a/examples/vhost_crypto/meson.build
+++ b/examples/vhost_crypto/meson.build
@@ -8,7 +8,7 @@
allow_experimental_apis = true
deps += ['vhost', 'cryptodev']
-cflags += ['-D_GNU_SOURCE','-D_FILE_OFFSET_BITS=64']
+cflags += ['-D_FILE_OFFSET_BITS=64']
sources = files(
'main.c'
)
diff --git a/examples/vhost_scsi/Makefile b/examples/vhost_scsi/Makefile
index fa0cf727..523aee0b 100644
--- a/examples/vhost_scsi/Makefile
+++ b/examples/vhost_scsi/Makefile
@@ -18,7 +18,7 @@ shared: build/$(APP)-shared
static: build/$(APP)-static
ln -sf $(APP)-static build/$(APP)
-CFLAGS += -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64
+CFLAGS += -D_FILE_OFFSET_BITS=64
LDFLAGS += -pthread
PC_FILE := $(shell pkg-config --path libdpdk)
@@ -57,7 +57,7 @@ please change the definition of the RTE_TARGET environment variable)
all:
else
-CFLAGS += -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64
+CFLAGS += -D_FILE_OFFSET_BITS=64
CFLAGS += -O2
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/vhost_scsi/meson.build b/examples/vhost_scsi/meson.build
index 5f92370f..2303bcae 100644
--- a/examples/vhost_scsi/meson.build
+++ b/examples/vhost_scsi/meson.build
@@ -10,7 +10,7 @@ if host_machine.system() != 'linux'
build = false
endif
deps += 'vhost'
-cflags += ['-D_GNU_SOURCE','-D_FILE_OFFSET_BITS=64']
+cflags += ['-D_FILE_OFFSET_BITS=64']
sources = files(
'scsi.c', 'vhost_scsi.c'
)
diff --git a/examples/vm_power_manager/Makefile b/examples/vm_power_manager/Makefile
index 13a5205b..50147c05 100644
--- a/examples/vm_power_manager/Makefile
+++ b/examples/vm_power_manager/Makefile
@@ -31,6 +31,12 @@ CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lvirt
+JANSSON := $(shell pkg-config --exists jansson; echo $$?)
+ifeq ($(JANSSON), 0)
+LDLIBS += $(shell pkg-config --libs jansson)
+CFLAGS += -DUSE_JANSSON
+endif
+
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 927fc35a..4fac099d 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -13,6 +13,7 @@
#include <sys/queue.h>
#include <sys/types.h>
+#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/select.h>
@@ -43,7 +44,8 @@ static unsigned char *global_cpumaps;
static virVcpuInfo *global_vircpuinfo;
static size_t global_maplen;
-static unsigned global_n_host_cpus;
+static unsigned int global_n_host_cpus;
+static bool global_hypervisor_available;
/*
* Represents a single Virtual Machine
@@ -198,7 +200,11 @@ get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
{
struct virtual_machine_info *vm_info =
(struct virtual_machine_info *)chan_info->priv_info;
- return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+
+ if (global_hypervisor_available && (vm_info != NULL))
+ return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+ else
+ return 0;
}
static inline int
@@ -280,6 +286,38 @@ open_non_blocking_channel(struct channel_info *info)
}
static int
+open_host_channel(struct channel_info *info)
+{
+ int flags;
+
+ info->fd = open(info->channel_path, O_RDWR | O_RSYNC);
+ if (info->fd == -1) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n",
+ strerror(errno),
+ info->channel_path);
+ return -1;
+ }
+
+ /* Get current flags */
+ flags = fcntl(info->fd, F_GETFL, 0);
+ if (flags < 0) {
+ RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
+ "'%s'\n", strerror(errno), info->channel_path);
+ return 1;
+ }
+ /* Set to Non Blocking */
+ flags |= O_NONBLOCK;
+ if (fcntl(info->fd, F_SETFL, flags) < 0) {
+ RTE_LOG(WARNING, CHANNEL_MANAGER,
+ "Error(%s) setting non-blocking "
+ "socket for '%s'\n",
+ strerror(errno), info->channel_path);
+ return -1;
+ }
+ return 0;
+}
+
+static int
setup_channel_info(struct virtual_machine_info **vm_info_dptr,
struct channel_info **chan_info_dptr, unsigned channel_num)
{
@@ -289,6 +327,7 @@ setup_channel_info(struct virtual_machine_info **vm_info_dptr,
chan_info->channel_num = channel_num;
chan_info->priv_info = (void *)vm_info;
chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+ chan_info->type = CHANNEL_TYPE_BINARY;
if (open_non_blocking_channel(chan_info) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
"'%s' for VM '%s'\n",
@@ -311,6 +350,42 @@ setup_channel_info(struct virtual_machine_info **vm_info_dptr,
return 0;
}
+static void
+fifo_path(char *dst, unsigned int len)
+{
+ snprintf(dst, len, "%sfifo", CHANNEL_MGR_SOCKET_PATH);
+}
+
+static int
+setup_host_channel_info(struct channel_info **chan_info_dptr,
+ unsigned int channel_num)
+{
+ struct channel_info *chan_info = *chan_info_dptr;
+
+ chan_info->channel_num = channel_num;
+ chan_info->priv_info = (void *)NULL;
+ chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+ chan_info->type = CHANNEL_TYPE_JSON;
+
+ fifo_path(chan_info->channel_path, sizeof(chan_info->channel_path));
+
+ if (open_host_channel(chan_info) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: "
+ "'%s'\n",
+ chan_info->channel_path);
+ return -1;
+ }
+ if (add_channel_to_monitor(&chan_info) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
+ "'%s' to epoll ctl\n",
+ chan_info->channel_path);
+ return -1;
+
+ }
+ chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
+ return 0;
+}
+
int
add_all_channels(const char *vm_name)
{
@@ -466,6 +541,45 @@ add_channels(const char *vm_name, unsigned *channel_list,
}
int
+add_host_channel(void)
+{
+ struct channel_info *chan_info;
+ char socket_path[PATH_MAX];
+ int num_channels_enabled = 0;
+ int ret;
+
+ fifo_path(socket_path, sizeof(socket_path));
+
+ ret = mkfifo(socket_path, 0660);
+ if ((errno != EEXIST) && (ret < 0)) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: "
+ "%s\n", socket_path, strerror(errno));
+ return 0;
+ }
+
+ if (access(socket_path, F_OK) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
+ "%s\n", socket_path, strerror(errno));
+ return 0;
+ }
+ chan_info = rte_malloc(NULL, sizeof(*chan_info), 0);
+ if (chan_info == NULL) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
+ "channel '%s'\n", socket_path);
+ return 0;
+ }
+ snprintf(chan_info->channel_path,
+ sizeof(chan_info->channel_path), "%s", socket_path);
+ if (setup_host_channel_info(&chan_info, 0) < 0) {
+ rte_free(chan_info);
+ return 0;
+ }
+ num_channels_enabled++;
+
+ return num_channels_enabled;
+}
+
+int
remove_channel(struct channel_info **chan_info_dptr)
{
struct virtual_machine_info *vm_info;
@@ -559,6 +673,8 @@ get_all_vm(int *num_vm, int *num_vcpu)
VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
+ if (!global_hypervisor_available)
+ return;
memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
@@ -768,38 +884,42 @@ connect_hypervisor(const char *path)
}
return 0;
}
-
int
-channel_manager_init(const char *path)
+channel_manager_init(const char *path __rte_unused)
{
virNodeInfo info;
LIST_INIT(&vm_list_head);
if (connect_hypervisor(path) < 0) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
- return -1;
- }
-
- global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+ global_n_host_cpus = 64;
+ global_hypervisor_available = 0;
+ RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
+ } else {
+ global_hypervisor_available = 1;
+
+ global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+
+ global_vircpuinfo = rte_zmalloc(NULL,
+ sizeof(*global_vircpuinfo) *
+ CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
+ if (global_vircpuinfo == NULL) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
+ goto error;
+ }
+ global_cpumaps = rte_zmalloc(NULL,
+ CHANNEL_CMDS_MAX_CPUS * global_maplen,
+ RTE_CACHE_LINE_SIZE);
+ if (global_cpumaps == NULL)
+ goto error;
- global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
- CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
- if (global_vircpuinfo == NULL) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
- goto error;
- }
- global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
- RTE_CACHE_LINE_SIZE);
- if (global_cpumaps == NULL) {
- goto error;
+ if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+ goto error;
+ }
+ global_n_host_cpus = (unsigned int)info.cpus;
}
- if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
- goto error;
- }
- global_n_host_cpus = (unsigned)info.cpus;
if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
@@ -811,7 +931,8 @@ channel_manager_init(const char *path)
return 0;
error:
- disconnect_hypervisor();
+ if (global_hypervisor_available)
+ disconnect_hypervisor();
return -1;
}
@@ -838,7 +959,10 @@ channel_manager_exit(void)
rte_free(vm_info);
}
- rte_free(global_cpumaps);
- rte_free(global_vircpuinfo);
- disconnect_hypervisor();
+ if (global_hypervisor_available) {
+ /* Only needed if hypervisor available */
+ rte_free(global_cpumaps);
+ rte_free(global_vircpuinfo);
+ disconnect_hypervisor();
+ }
}
diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index 872ec614..d948b304 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -37,7 +37,7 @@ struct sockaddr_un _sockaddr_un;
#define UNIX_PATH_MAX sizeof(_sockaddr_un.sun_path)
#endif
-#define MAX_VMS 4
+#define MAX_CLIENTS 64
#define MAX_VCPUS 20
@@ -47,13 +47,20 @@ struct libvirt_vm_info {
uint8_t num_cpus;
};
-struct libvirt_vm_info lvm_info[MAX_VMS];
+struct libvirt_vm_info lvm_info[MAX_CLIENTS];
/* Communication Channel Status */
enum channel_status { CHANNEL_MGR_CHANNEL_DISCONNECTED = 0,
CHANNEL_MGR_CHANNEL_CONNECTED,
CHANNEL_MGR_CHANNEL_DISABLED,
CHANNEL_MGR_CHANNEL_PROCESSING};
+/* Communication Channel Type */
+enum channel_type {
+ CHANNEL_TYPE_BINARY = 0,
+ CHANNEL_TYPE_INI,
+ CHANNEL_TYPE_JSON
+};
+
/* VM libvirt(qemu/KVM) connection status */
enum vm_status { CHANNEL_MGR_VM_INACTIVE = 0, CHANNEL_MGR_VM_ACTIVE};
@@ -66,6 +73,7 @@ struct channel_info {
volatile uint32_t status; /**< Connection status(enum channel_status) */
int fd; /**< AF_UNIX socket fd */
unsigned channel_num; /**< CHANNEL_MGR_SOCKET_PATH/<vm_name>.channel_num */
+ enum channel_type type; /**< Binary, ini, json, etc. */
void *priv_info; /**< Pointer to private info, do not modify */
};
@@ -227,6 +235,15 @@ int add_channels(const char *vm_name, unsigned *channel_list,
unsigned num_channels);
/**
+ * Set up a fifo by which host applications can send command an policies
+ * through a fifo to the vm_power_manager
+ *
+ * @return
+ * - 0 for success
+ */
+int add_host_channel(void);
+
+/**
* Remove a channel definition from the channel manager. This must only be
* called from the channel monitor thread.
*
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 7fa47ba9..5da53154 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -9,11 +9,18 @@
#include <signal.h>
#include <errno.h>
#include <string.h>
+#include <fcntl.h>
#include <sys/types.h>
#include <sys/epoll.h>
#include <sys/queue.h>
#include <sys/time.h>
-
+#include <sys/socket.h>
+#include <sys/select.h>
+#ifdef USE_JANSSON
+#include <jansson.h>
+#else
+#pragma message "Jansson dev libs unavailable, not including JSON parsing"
+#endif
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_malloc.h>
@@ -35,13 +42,243 @@
uint64_t vsi_pkt_count_prev[384];
uint64_t rdtsc_prev[384];
+#define MAX_JSON_STRING_LEN 1024
+char json_data[MAX_JSON_STRING_LEN];
double time_period_ms = 1;
static volatile unsigned run_loop = 1;
static int global_event_fd;
static unsigned int policy_is_set;
static struct epoll_event *global_events_list;
-static struct policy policies[MAX_VMS];
+static struct policy policies[MAX_CLIENTS];
+
+#ifdef USE_JANSSON
+
+union PFID {
+ struct ether_addr addr;
+ uint64_t pfid;
+};
+
+static int
+str_to_ether_addr(const char *a, struct ether_addr *ether_addr)
+{
+ int i;
+ char *end;
+ unsigned long o[ETHER_ADDR_LEN];
+
+ i = 0;
+ do {
+ errno = 0;
+ o[i] = strtoul(a, &end, 16);
+ if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+ return -1;
+ a = end + 1;
+ } while (++i != RTE_DIM(o) / sizeof(o[0]) && end[0] != 0);
+
+ /* Junk at the end of line */
+ if (end[0] != 0)
+ return -1;
+
+ /* Support the format XX:XX:XX:XX:XX:XX */
+ if (i == ETHER_ADDR_LEN) {
+ while (i-- != 0) {
+ if (o[i] > UINT8_MAX)
+ return -1;
+ ether_addr->addr_bytes[i] = (uint8_t)o[i];
+ }
+ /* Support the format XXXX:XXXX:XXXX */
+ } else if (i == ETHER_ADDR_LEN / 2) {
+ while (i-- != 0) {
+ if (o[i] > UINT16_MAX)
+ return -1;
+ ether_addr->addr_bytes[i * 2] =
+ (uint8_t)(o[i] >> 8);
+ ether_addr->addr_bytes[i * 2 + 1] =
+ (uint8_t)(o[i] & 0xff);
+ }
+ /* unknown format */
+ } else
+ return -1;
+
+ return 0;
+}
+
+static int
+set_policy_mac(struct channel_packet *pkt, int idx, char *mac)
+{
+ union PFID pfid;
+ int ret;
+
+ /* Use port MAC address as the vfid */
+ ret = str_to_ether_addr(mac, &pfid.addr);
+
+ if (ret != 0) {
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Invalid mac address received in JSON\n");
+ pkt->vfid[idx] = 0;
+ return -1;
+ }
+
+ printf("Received MAC Address: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
+ "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
+ pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
+ pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
+ pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
+
+ pkt->vfid[idx] = pfid.pfid;
+ return 0;
+}
+
+
+static int
+parse_json_to_pkt(json_t *element, struct channel_packet *pkt)
+{
+ const char *key;
+ json_t *value;
+ int ret;
+
+ memset(pkt, 0, sizeof(struct channel_packet));
+
+ pkt->nb_mac_to_monitor = 0;
+ pkt->t_boost_status.tbEnabled = false;
+ pkt->workload = LOW;
+ pkt->policy_to_use = TIME;
+ pkt->command = PKT_POLICY;
+ pkt->core_type = CORE_TYPE_PHYSICAL;
+
+ json_object_foreach(element, key, value) {
+ if (!strcmp(key, "policy")) {
+ /* Recurse in to get the contents of profile */
+ ret = parse_json_to_pkt(value, pkt);
+ if (ret)
+ return ret;
+ } else if (!strcmp(key, "instruction")) {
+ /* Recurse in to get the contents of instruction */
+ ret = parse_json_to_pkt(value, pkt);
+ if (ret)
+ return ret;
+ } else if (!strcmp(key, "name")) {
+ strcpy(pkt->vm_name, json_string_value(value));
+ } else if (!strcmp(key, "command")) {
+ char command[32];
+ snprintf(command, 32, "%s", json_string_value(value));
+ if (!strcmp(command, "power")) {
+ pkt->command = CPU_POWER;
+ } else if (!strcmp(command, "create")) {
+ pkt->command = PKT_POLICY;
+ } else if (!strcmp(command, "destroy")) {
+ pkt->command = PKT_POLICY_REMOVE;
+ } else {
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Invalid command received in JSON\n");
+ return -1;
+ }
+ } else if (!strcmp(key, "policy_type")) {
+ char command[32];
+ snprintf(command, 32, "%s", json_string_value(value));
+ if (!strcmp(command, "TIME")) {
+ pkt->policy_to_use = TIME;
+ } else if (!strcmp(command, "TRAFFIC")) {
+ pkt->policy_to_use = TRAFFIC;
+ } else if (!strcmp(command, "WORKLOAD")) {
+ pkt->policy_to_use = WORKLOAD;
+ } else if (!strcmp(command, "BRANCH_RATIO")) {
+ pkt->policy_to_use = BRANCH_RATIO;
+ } else {
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Wrong policy_type received in JSON\n");
+ return -1;
+ }
+ } else if (!strcmp(key, "workload")) {
+ char command[32];
+ snprintf(command, 32, "%s", json_string_value(value));
+ if (!strcmp(command, "HIGH")) {
+ pkt->workload = HIGH;
+ } else if (!strcmp(command, "MEDIUM")) {
+ pkt->workload = MEDIUM;
+ } else if (!strcmp(command, "LOW")) {
+ pkt->workload = LOW;
+ } else {
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Wrong workload received in JSON\n");
+ return -1;
+ }
+ } else if (!strcmp(key, "busy_hours")) {
+ unsigned int i;
+ size_t size = json_array_size(value);
+
+ for (i = 0; i < size; i++) {
+ int hour = (int)json_integer_value(
+ json_array_get(value, i));
+ pkt->timer_policy.busy_hours[i] = hour;
+ }
+ } else if (!strcmp(key, "quiet_hours")) {
+ unsigned int i;
+ size_t size = json_array_size(value);
+
+ for (i = 0; i < size; i++) {
+ int hour = (int)json_integer_value(
+ json_array_get(value, i));
+ pkt->timer_policy.quiet_hours[i] = hour;
+ }
+ } else if (!strcmp(key, "core_list")) {
+ unsigned int i;
+ size_t size = json_array_size(value);
+
+ for (i = 0; i < size; i++) {
+ int core = (int)json_integer_value(
+ json_array_get(value, i));
+ pkt->vcpu_to_control[i] = core;
+ }
+ pkt->num_vcpu = size;
+ } else if (!strcmp(key, "mac_list")) {
+ unsigned int i;
+ size_t size = json_array_size(value);
+
+ for (i = 0; i < size; i++) {
+ char mac[32];
+ snprintf(mac, 32, "%s", json_string_value(
+ json_array_get(value, i)));
+ set_policy_mac(pkt, i, mac);
+ }
+ pkt->nb_mac_to_monitor = size;
+ } else if (!strcmp(key, "avg_packet_thresh")) {
+ pkt->traffic_policy.avg_max_packet_thresh =
+ (uint32_t)json_integer_value(value);
+ } else if (!strcmp(key, "max_packet_thresh")) {
+ pkt->traffic_policy.max_max_packet_thresh =
+ (uint32_t)json_integer_value(value);
+ } else if (!strcmp(key, "unit")) {
+ char unit[32];
+ snprintf(unit, 32, "%s", json_string_value(value));
+ if (!strcmp(unit, "SCALE_UP")) {
+ pkt->unit = CPU_POWER_SCALE_UP;
+ } else if (!strcmp(unit, "SCALE_DOWN")) {
+ pkt->unit = CPU_POWER_SCALE_DOWN;
+ } else if (!strcmp(unit, "SCALE_MAX")) {
+ pkt->unit = CPU_POWER_SCALE_MAX;
+ } else if (!strcmp(unit, "SCALE_MIN")) {
+ pkt->unit = CPU_POWER_SCALE_MIN;
+ } else if (!strcmp(unit, "ENABLE_TURBO")) {
+ pkt->unit = CPU_POWER_ENABLE_TURBO;
+ } else if (!strcmp(unit, "DISABLE_TURBO")) {
+ pkt->unit = CPU_POWER_DISABLE_TURBO;
+ } else {
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Invalid command received in JSON\n");
+ return -1;
+ }
+ } else if (!strcmp(key, "resource_id")) {
+ pkt->resource_id = (uint32_t)json_integer_value(value);
+ } else {
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Unknown key received in JSON string: %s\n",
+ key);
+ }
+ }
+ return 0;
+}
+#endif
void channel_monitor_exit(void)
{
@@ -66,7 +303,7 @@ static void
core_share_status(int pNo)
{
- int noVms, noVcpus, z, x, t;
+ int noVms = 0, noVcpus = 0, z, x, t;
get_all_vm(&noVms, &noVcpus);
@@ -85,6 +322,33 @@ core_share_status(int pNo)
}
}
+
+static int
+pcpu_monitor(struct policy *pol, struct core_info *ci, int pcpu, int count)
+{
+ int ret = 0;
+
+ if (pol->pkt.policy_to_use == BRANCH_RATIO) {
+ ci->cd[pcpu].oob_enabled = 1;
+ ret = add_core_to_monitor(pcpu);
+ if (ret == 0)
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Monitoring pcpu %d OOB for %s\n",
+ pcpu, pol->pkt.vm_name);
+ else
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Error monitoring pcpu %d OOB for %s\n",
+ pcpu, pol->pkt.vm_name);
+
+ } else {
+ pol->core_share[count].pcpu = pcpu;
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Monitoring pcpu %d for %s\n",
+ pcpu, pol->pkt.vm_name);
+ }
+ return ret;
+}
+
static void
get_pcpu_to_control(struct policy *pol)
{
@@ -94,34 +358,42 @@ get_pcpu_to_control(struct policy *pol)
int pcpu, count;
uint64_t mask_u64b;
struct core_info *ci;
- int ret;
ci = get_core_info();
- RTE_LOG(INFO, CHANNEL_MONITOR, "Looking for pcpu for %s\n",
- pol->pkt.vm_name);
- get_info_vm(pol->pkt.vm_name, &info);
-
- for (count = 0; count < pol->pkt.num_vcpu; count++) {
- mask_u64b = info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
- for (pcpu = 0; mask_u64b; mask_u64b &= ~(1ULL << pcpu++)) {
- if ((mask_u64b >> pcpu) & 1) {
- if (pol->pkt.policy_to_use == BRANCH_RATIO) {
- ci->cd[pcpu].oob_enabled = 1;
- ret = add_core_to_monitor(pcpu);
- if (ret == 0)
- printf("Monitoring pcpu %d via Branch Ratio\n",
- pcpu);
- else
- printf("Failed to start OOB Monitoring pcpu %d\n",
- pcpu);
-
- } else {
- pol->core_share[count].pcpu = pcpu;
- printf("Monitoring pcpu %d\n", pcpu);
- }
+ RTE_LOG(DEBUG, CHANNEL_MONITOR,
+ "Looking for pcpu for %s\n", pol->pkt.vm_name);
+
+ /*
+ * So now that we're handling virtual and physical cores, we need to
+ * differenciate between them when adding them to the branch monitor.
+ * Virtual cores need to be converted to physical cores.
+ */
+ if (pol->pkt.core_type == CORE_TYPE_VIRTUAL) {
+ /*
+ * If the cores in the policy are virtual, we need to map them
+ * to physical core. We look up the vm info and use that for
+ * the mapping.
+ */
+ get_info_vm(pol->pkt.vm_name, &info);
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ mask_u64b =
+ info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
+ for (pcpu = 0; mask_u64b;
+ mask_u64b &= ~(1ULL << pcpu++)) {
+ if ((mask_u64b >> pcpu) & 1)
+ pcpu_monitor(pol, ci, pcpu, count);
}
}
+ } else {
+ /*
+ * If the cores in the policy are physical, we just use
+ * those core id's directly.
+ */
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ pcpu = pol->pkt.vcpu_to_control[count];
+ pcpu_monitor(pol, ci, pcpu, count);
+ }
}
}
@@ -160,8 +432,13 @@ update_policy(struct channel_packet *pkt)
unsigned int updated = 0;
int i;
- for (i = 0; i < MAX_VMS; i++) {
+
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Applying policy for %s\n", pkt->vm_name);
+
+ for (i = 0; i < MAX_CLIENTS; i++) {
if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
+ /* Copy the contents of *pkt into the policy.pkt */
policies[i].pkt = *pkt;
get_pcpu_to_control(&policies[i]);
if (get_pfid(&policies[i]) == -1) {
@@ -174,7 +451,7 @@ update_policy(struct channel_packet *pkt)
}
}
if (!updated) {
- for (i = 0; i < MAX_VMS; i++) {
+ for (i = 0; i < MAX_CLIENTS; i++) {
if (policies[i].enabled == 0) {
policies[i].pkt = *pkt;
get_pcpu_to_control(&policies[i]);
@@ -189,6 +466,24 @@ update_policy(struct channel_packet *pkt)
return 0;
}
+static int
+remove_policy(struct channel_packet *pkt __rte_unused)
+{
+ int i;
+
+ /*
+ * Disabling the policy is simply a case of setting
+ * enabled to 0
+ */
+ for (i = 0; i < MAX_CLIENTS; i++) {
+ if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
+ policies[i].enabled = 0;
+ return 0;
+ }
+ }
+ return -1;
+}
+
static uint64_t
get_pkt_diff(struct policy *pol)
{
@@ -233,8 +528,6 @@ apply_traffic_profile(struct policy *pol)
diff = get_pkt_diff(pol);
- RTE_LOG(INFO, CHANNEL_MONITOR, "Applying traffic profile\n");
-
if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) {
for (count = 0; count < pol->pkt.num_vcpu; count++) {
if (pol->core_share[count].status != 1)
@@ -278,9 +571,6 @@ apply_time_profile(struct policy *pol)
if (pol->core_share[count].status != 1) {
power_manager_scale_core_max(
pol->core_share[count].pcpu);
- RTE_LOG(INFO, CHANNEL_MONITOR,
- "Scaling up core %d to max\n",
- pol->core_share[count].pcpu);
}
}
break;
@@ -290,9 +580,6 @@ apply_time_profile(struct policy *pol)
if (pol->core_share[count].status != 1) {
power_manager_scale_core_min(
pol->core_share[count].pcpu);
- RTE_LOG(INFO, CHANNEL_MONITOR,
- "Scaling down core %d to min\n",
- pol->core_share[count].pcpu);
}
}
break;
@@ -346,7 +633,6 @@ apply_policy(struct policy *pol)
apply_workload_profile(pol);
}
-
static int
process_request(struct channel_packet *pkt, struct channel_info *chan_info)
{
@@ -362,10 +648,12 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
if (pkt->command == CPU_POWER) {
core_mask = get_pcpus_mask(chan_info, pkt->resource_id);
if (core_mask == 0) {
- RTE_LOG(ERR, CHANNEL_MONITOR, "Error get physical CPU mask for "
- "channel '%s' using vCPU(%u)\n", chan_info->channel_path,
- (unsigned)pkt->unit);
- return -1;
+ /*
+ * Core mask will be 0 in the case where
+ * hypervisor is not available so we're working in
+ * the host, so use the core as the mask.
+ */
+ core_mask = 1ULL << pkt->resource_id;
}
if (__builtin_popcountll(core_mask) == 1) {
@@ -421,12 +709,20 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
}
if (pkt->command == PKT_POLICY) {
- RTE_LOG(INFO, CHANNEL_MONITOR, "\nProcessing Policy request from Guest\n");
+ RTE_LOG(INFO, CHANNEL_MONITOR, "Processing policy request %s\n",
+ pkt->vm_name);
update_policy(pkt);
policy_is_set = 1;
}
- /* Return is not checked as channel status may have been set to DISABLED
+ if (pkt->command == PKT_POLICY_REMOVE) {
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Removing policy %s\n", pkt->vm_name);
+ remove_policy(pkt);
+ }
+
+ /*
+ * Return is not checked as channel status may have been set to DISABLED
* from management thread
*/
rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING,
@@ -448,13 +744,16 @@ add_channel_to_monitor(struct channel_info **chan_info)
"to epoll\n", info->channel_path);
return -1;
}
+ RTE_LOG(ERR, CHANNEL_MONITOR, "Added channel '%s' "
+ "to monitor\n", info->channel_path);
return 0;
}
int
remove_channel_from_monitor(struct channel_info *chan_info)
{
- if (epoll_ctl(global_event_fd, EPOLL_CTL_DEL, chan_info->fd, NULL) < 0) {
+ if (epoll_ctl(global_event_fd, EPOLL_CTL_DEL,
+ chan_info->fd, NULL) < 0) {
RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to remove channel '%s' "
"from epoll\n", chan_info->channel_path);
return -1;
@@ -467,11 +766,13 @@ channel_monitor_init(void)
{
global_event_fd = epoll_create1(0);
if (global_event_fd == 0) {
- RTE_LOG(ERR, CHANNEL_MONITOR, "Error creating epoll context with "
- "error %s\n", strerror(errno));
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Error creating epoll context with error %s\n",
+ strerror(errno));
return -1;
}
- global_events_list = rte_malloc("epoll_events", sizeof(*global_events_list)
+ global_events_list = rte_malloc("epoll_events",
+ sizeof(*global_events_list)
* MAX_EVENTS, RTE_CACHE_LINE_SIZE);
if (global_events_list == NULL) {
RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for "
@@ -481,6 +782,103 @@ channel_monitor_init(void)
return 0;
}
+static void
+read_binary_packet(struct channel_info *chan_info)
+{
+ struct channel_packet pkt;
+ void *buffer = &pkt;
+ int buffer_len = sizeof(pkt);
+ int n_bytes, err = 0;
+
+ while (buffer_len > 0) {
+ n_bytes = read(chan_info->fd,
+ buffer, buffer_len);
+ if (n_bytes == buffer_len)
+ break;
+ if (n_bytes == -1) {
+ err = errno;
+ RTE_LOG(DEBUG, CHANNEL_MONITOR,
+ "Received error on "
+ "channel '%s' read: %s\n",
+ chan_info->channel_path,
+ strerror(err));
+ remove_channel(&chan_info);
+ break;
+ }
+ buffer = (char *)buffer + n_bytes;
+ buffer_len -= n_bytes;
+ }
+ if (!err)
+ process_request(&pkt, chan_info);
+}
+
+#ifdef USE_JANSSON
+static void
+read_json_packet(struct channel_info *chan_info)
+{
+ struct channel_packet pkt;
+ int n_bytes, ret;
+ json_t *root;
+ json_error_t error;
+
+ /* read opening brace to closing brace */
+ do {
+ int idx = 0;
+ int indent = 0;
+ do {
+ n_bytes = read(chan_info->fd, &json_data[idx], 1);
+ if (n_bytes == 0)
+ break;
+ if (json_data[idx] == '{')
+ indent++;
+ if (json_data[idx] == '}')
+ indent--;
+ if ((indent > 0) || (idx > 0))
+ idx++;
+ if (indent == 0)
+ json_data[idx] = 0;
+ if (idx >= MAX_JSON_STRING_LEN-1)
+ break;
+ } while (indent > 0);
+
+ if (indent > 0)
+ /*
+ * We've broken out of the read loop without getting
+ * a closing brace, so throw away the data
+ */
+ json_data[idx] = 0;
+
+ if (strlen(json_data) == 0)
+ continue;
+
+ printf("got [%s]\n", json_data);
+
+ root = json_loads(json_data, 0, &error);
+
+ if (root) {
+ /*
+ * Because our data is now in the json
+ * object, we can overwrite the pkt
+ * with a channel_packet struct, using
+ * parse_json_to_pkt()
+ */
+ ret = parse_json_to_pkt(root, &pkt);
+ json_decref(root);
+ if (ret) {
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "Error validating JSON profile data\n");
+ break;
+ }
+ process_request(&pkt, chan_info);
+ } else {
+ RTE_LOG(ERR, CHANNEL_MONITOR,
+ "JSON error on line %d: %s\n",
+ error.line, error.text);
+ }
+ } while (n_bytes > 0);
+}
+#endif
+
void
run_channel_monitor(void)
{
@@ -496,7 +894,8 @@ run_channel_monitor(void)
global_events_list[i].data.ptr;
if ((global_events_list[i].events & EPOLLERR) ||
(global_events_list[i].events & EPOLLHUP)) {
- RTE_LOG(DEBUG, CHANNEL_MONITOR, "Remote closed connection for "
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Remote closed connection for "
"channel '%s'\n",
chan_info->channel_path);
remove_channel(&chan_info);
@@ -504,38 +903,25 @@ run_channel_monitor(void)
}
if (global_events_list[i].events & EPOLLIN) {
- int n_bytes, err = 0;
- struct channel_packet pkt;
- void *buffer = &pkt;
- int buffer_len = sizeof(pkt);
-
- while (buffer_len > 0) {
- n_bytes = read(chan_info->fd,
- buffer, buffer_len);
- if (n_bytes == buffer_len)
- break;
- if (n_bytes == -1) {
- err = errno;
- RTE_LOG(DEBUG, CHANNEL_MONITOR,
- "Received error on "
- "channel '%s' read: %s\n",
- chan_info->channel_path,
- strerror(err));
- remove_channel(&chan_info);
- break;
- }
- buffer = (char *)buffer + n_bytes;
- buffer_len -= n_bytes;
+ switch (chan_info->type) {
+ case CHANNEL_TYPE_BINARY:
+ read_binary_packet(chan_info);
+ break;
+#ifdef USE_JANSSON
+ case CHANNEL_TYPE_JSON:
+ read_json_packet(chan_info);
+ break;
+#endif
+ default:
+ break;
}
- if (!err)
- process_request(&pkt, chan_info);
}
}
rte_delay_us(time_period_ms*1000);
if (policy_is_set) {
int j;
- for (j = 0; j < MAX_VMS; j++) {
+ for (j = 0; j < MAX_CLIENTS; j++) {
if (policies[j].enabled == 1)
apply_policy(&policies[j]);
}
diff --git a/examples/vm_power_manager/guest_cli/meson.build b/examples/vm_power_manager/guest_cli/meson.build
new file mode 100644
index 00000000..9e821ceb
--- /dev/null
+++ b/examples/vm_power_manager/guest_cli/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Setting the name here because the default name will conflict with the
+# vm_power_manager app because of the way the directories are parsed.
+name = 'guest_cli'
+
+deps += ['power']
+
+sources = files(
+ 'main.c', 'parse.c', 'vm_power_cli_guest.c'
+)
+
+opt_dep = cc.find_library('virt', required : false)
+build = opt_dep.found()
+ext_deps += opt_dep
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 0db1b804..2d9e7689 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -92,6 +92,7 @@ set_policy_defaults(struct channel_packet *pkt)
pkt->timer_policy.hours_to_use_traffic_profile[0] = 8;
pkt->timer_policy.hours_to_use_traffic_profile[1] = 10;
+ pkt->core_type = CORE_TYPE_VIRTUAL;
pkt->workload = LOW;
pkt->policy_to_use = TIME;
pkt->command = PKT_POLICY;
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index 58c5fa45..893bf4cd 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -421,6 +421,8 @@ main(int argc, char **argv)
return -1;
}
+ add_host_channel();
+
printf("Running core monitor on lcore id %d\n", lcore_id);
rte_eal_remote_launch(run_core_monitor, NULL, lcore_id);
diff --git a/examples/vm_power_manager/meson.build b/examples/vm_power_manager/meson.build
index c370d747..f98445bc 100644
--- a/examples/vm_power_manager/meson.build
+++ b/examples/vm_power_manager/meson.build
@@ -6,5 +6,38 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
-# Example app currently unsupported by meson build
-build = false
+if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD')
+ deps += ['pmd_bnxt']
+endif
+
+if dpdk_conf.has('RTE_LIBRTE_I40E_PMD')
+ deps += ['pmd_i40e']
+endif
+
+if dpdk_conf.has('RTE_LIBRTE_IXGBE_PMD')
+ deps += ['pmd_ixgbe']
+endif
+
+deps += ['power']
+
+
+sources = files(
+ 'channel_manager.c', 'channel_monitor.c', 'main.c', 'parse.c', 'power_manager.c', 'vm_power_cli.c'
+)
+
+# If we're on X86, pull in the x86 code for the branch monitor algo.
+if dpdk_conf.has('RTE_ARCH_X86_64')
+ sources += files('oob_monitor_x86.c')
+else
+ sources += files('oob_monitor_nop.c')
+endif
+
+opt_dep = cc.find_library('virt', required : false)
+build = opt_dep.found()
+ext_deps += opt_dep
+
+opt_dep = dependency('jansson', required : false)
+if opt_dep.found()
+ ext_deps += opt_dep
+ cflags += '-DUSE_JANSSON'
+endif