aboutsummaryrefslogtreecommitdiffstats
path: root/app
diff options
context:
space:
mode:
Diffstat (limited to 'app')
-rw-r--r--app/meson.build11
-rw-r--r--app/pdump/main.c6
-rw-r--r--app/pdump/meson.build2
-rw-r--r--app/proc-info/meson.build2
-rw-r--r--app/test-bbdev/meson.build2
-rw-r--r--app/test-bbdev/test_bbdev_perf.c14
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.c22
-rw-r--r--app/test-crypto-perf/main.c12
-rw-r--r--app/test-crypto-perf/meson.build2
-rw-r--r--app/test-eventdev/test_perf_common.c1
-rw-r--r--app/test-eventdev/test_pipeline_atq.c287
-rw-r--r--app/test-eventdev/test_pipeline_common.c220
-rw-r--r--app/test-eventdev/test_pipeline_common.h70
-rw-r--r--app/test-eventdev/test_pipeline_queue.c260
-rw-r--r--app/test-pmd/Makefile4
-rw-r--r--app/test-pmd/cmdline.c984
-rw-r--r--app/test-pmd/cmdline_flow.c870
-rw-r--r--app/test-pmd/cmdline_mtr.c6
-rw-r--r--app/test-pmd/cmdline_tm.c260
-rw-r--r--app/test-pmd/cmdline_tm.h3
-rw-r--r--app/test-pmd/config.c652
-rw-r--r--app/test-pmd/csumonly.c18
-rw-r--r--app/test-pmd/meson.build6
-rw-r--r--app/test-pmd/noisy_vnf.c279
-rw-r--r--app/test-pmd/parameters.c107
-rw-r--r--app/test-pmd/rxonly.c138
-rw-r--r--app/test-pmd/testpmd.c779
-rw-r--r--app/test-pmd/testpmd.h129
-rw-r--r--app/test-pmd/util.c219
29 files changed, 4127 insertions, 1238 deletions
diff --git a/app/meson.build b/app/meson.build
index 99e0b93e..a9a026bb 100644
--- a/app/meson.build
+++ b/app/meson.build
@@ -11,20 +11,25 @@ apps = ['pdump',
# for BSD only
lib_execinfo = cc.find_library('execinfo', required: false)
+default_cflags = machine_args
+
+# specify -D_GNU_SOURCE unconditionally
+default_cflags += '-D_GNU_SOURCE'
+
foreach app:apps
build = true
name = app
allow_experimental_apis = false
sources = []
includes = []
- cflags = machine_args
+ cflags = default_cflags
objs = [] # other object files to link against, used e.g. for
# instruction-set optimized versions of code
# use "deps" for internal DPDK dependencies, and "ext_deps" for
# external package/library requirements
ext_deps = []
- deps = []
+ deps = dpdk_app_link_libraries
subdir(name)
@@ -38,7 +43,7 @@ foreach app:apps
link_libs = []
if get_option('default_library') == 'static'
- link_libs = dpdk_drivers
+ link_libs = dpdk_static_libraries + dpdk_drivers
endif
if allow_experimental_apis
diff --git a/app/pdump/main.c b/app/pdump/main.c
index ac228712..d96556ec 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -81,7 +81,7 @@ enum pdump_by {
DEVICE_ID = 2
};
-const char *valid_pdump_arguments[] = {
+static const char * const valid_pdump_arguments[] = {
PDUMP_PORT_ARG,
PDUMP_PCI_ARG,
PDUMP_QUEUE_ARG,
@@ -136,9 +136,9 @@ struct parse_val {
uint64_t val;
};
-int num_tuples;
+static int num_tuples;
static struct rte_eth_conf port_conf_default;
-volatile uint8_t quit_signal;
+static volatile uint8_t quit_signal;
/**< display usage */
static void
diff --git a/app/pdump/meson.build b/app/pdump/meson.build
index 988cb4eb..116c27f0 100644
--- a/app/pdump/meson.build
+++ b/app/pdump/meson.build
@@ -3,4 +3,4 @@
sources = files('main.c')
allow_experimental_apis = true
-deps = ['ethdev', 'kvargs', 'pdump']
+deps += ['ethdev', 'kvargs', 'pdump']
diff --git a/app/proc-info/meson.build b/app/proc-info/meson.build
index 9c148e36..a52b2ee4 100644
--- a/app/proc-info/meson.build
+++ b/app/proc-info/meson.build
@@ -3,4 +3,4 @@
sources = files('main.c')
allow_experimental_apis = true
-deps = ['ethdev', 'metrics']
+deps += ['ethdev', 'metrics']
diff --git a/app/test-bbdev/meson.build b/app/test-bbdev/meson.build
index 653907de..eb8cc049 100644
--- a/app/test-bbdev/meson.build
+++ b/app/test-bbdev/meson.build
@@ -6,4 +6,4 @@ sources = files('main.c',
'test_bbdev_perf.c',
'test_bbdev_vector.c')
allow_experimental_apis = true
-deps = ['bbdev', 'bus_vdev']
+deps += ['bbdev', 'bus_vdev']
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index 6861edc4..fbe6cc91 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -267,12 +267,13 @@ create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id,
static int
create_mempools(struct active_device *ad, int socket_id,
- enum rte_bbdev_op_type op_type, uint16_t num_ops)
+ enum rte_bbdev_op_type org_op_type, uint16_t num_ops)
{
struct rte_mempool *mp;
unsigned int ops_pool_size, mbuf_pool_size = 0;
char pool_name[RTE_MEMPOOL_NAMESIZE];
const char *op_type_str;
+ enum rte_bbdev_op_type op_type = org_op_type;
struct op_data_entries *in = &test_vector.entries[DATA_INPUT];
struct op_data_entries *hard_out =
@@ -289,6 +290,9 @@ create_mempools(struct active_device *ad, int socket_id,
OPS_CACHE_SIZE + 1)),
OPS_POOL_SIZE_MIN));
+ if (org_op_type == RTE_BBDEV_OP_NONE)
+ op_type = RTE_BBDEV_OP_TURBO_ENC;
+
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
@@ -303,6 +307,10 @@ create_mempools(struct active_device *ad, int socket_id,
socket_id);
ad->ops_mempool = mp;
+ /* Do not create inputs and outputs mbufs for BaseBand Null Device */
+ if (org_op_type == RTE_BBDEV_OP_NONE)
+ return TEST_SUCCESS;
+
/* Inputs */
mbuf_pool_size = optimal_mempool_size(ops_pool_size * in->nb_segments);
mp = create_mbuf_pool(in, ad->dev_id, socket_id, mbuf_pool_size, "in");
@@ -1058,14 +1066,14 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
rte_bbdev_info_get(ad->dev_id, &info);
socket_id = GET_SOCKET(info.socket_id);
- if (op_type == RTE_BBDEV_OP_NONE)
- op_type = RTE_BBDEV_OP_TURBO_ENC;
f_ret = create_mempools(ad, socket_id, op_type,
get_num_ops());
if (f_ret != TEST_SUCCESS) {
printf("Couldn't create mempools");
goto fail;
}
+ if (op_type == RTE_BBDEV_OP_NONE)
+ op_type = RTE_BBDEV_OP_TURBO_ENC;
f_ret = init_test_op_params(op_params, test_vector.op_type,
test_vector.expected_status,
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index 907a995c..1af95249 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -419,13 +419,19 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
t_vec->cipher_key.length = 0;
t_vec->ciphertext.data = plaintext;
t_vec->cipher_key.data = NULL;
- t_vec->cipher_iv.data = NULL;
} else {
t_vec->cipher_key.length = options->cipher_key_sz;
t_vec->ciphertext.data = ciphertext;
t_vec->cipher_key.data = cipher_key;
- t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
- 16);
+ }
+
+ /* Init IV data ptr */
+ t_vec->cipher_iv.data = NULL;
+
+ if (options->cipher_iv_sz != 0) {
+ /* Set IV parameters */
+ t_vec->cipher_iv.data = rte_malloc(NULL,
+ options->cipher_iv_sz, 16);
if (t_vec->cipher_iv.data == NULL) {
rte_free(t_vec);
return NULL;
@@ -433,17 +439,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
}
t_vec->ciphertext.length = options->max_buffer_size;
-
- /* Set IV parameters */
- t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
- 16);
- if (options->cipher_iv_sz && t_vec->cipher_iv.data == NULL) {
- rte_free(t_vec);
- return NULL;
- }
- memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
t_vec->cipher_iv.length = options->cipher_iv_sz;
-
t_vec->data.cipher_offset = 0;
t_vec->data.cipher_length = options->max_buffer_size;
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 5c7dadb6..953e058c 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -342,7 +342,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->cipher_iv.data == NULL)
+ /* Cipher IV is only required for some algorithms */
+ if (opts->cipher_iv_sz &&
+ test_vec->cipher_iv.data == NULL)
return -1;
if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
@@ -357,7 +359,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
- if (test_vec->auth_key.data == NULL)
+ /* Auth key is only required for some algorithms */
+ if (opts->auth_key_sz &&
+ test_vec->auth_key.data == NULL)
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
@@ -421,6 +425,10 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
+ if (test_vec->aead_key.data == NULL)
+ return -1;
+ if (test_vec->aead_key.length != opts->aead_key_sz)
+ return -1;
if (test_vec->aead_iv.data == NULL)
return -1;
if (test_vec->aead_iv.length != opts->aead_iv_sz)
diff --git a/app/test-crypto-perf/meson.build b/app/test-crypto-perf/meson.build
index eacd7a0f..d735b186 100644
--- a/app/test-crypto-perf/meson.build
+++ b/app/test-crypto-perf/meson.build
@@ -12,4 +12,4 @@ sources = files('cperf_ops.c',
'cperf_test_vectors.c',
'cperf_test_verify.c',
'main.c')
-deps = ['cryptodev']
+deps += ['cryptodev']
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index d33cb2cd..86187753 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -680,7 +680,6 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index 26dc79f9..c60635bf 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -15,10 +15,10 @@ pipeline_atq_nb_event_queues(struct evt_options *opt)
return rte_eth_dev_count_avail();
}
-static int
+static __rte_noinline int
pipeline_atq_worker_single_stage_tx(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_INIT;
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -28,23 +28,18 @@ pipeline_atq_worker_single_stage_tx(void *arg)
continue;
}
- if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- pipeline_tx_pkt(ev.mbuf);
- w->processed_pkts++;
- continue;
- }
- pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ pipeline_event_tx(dev, port, &ev);
+ w->processed_pkts++;
}
return 0;
}
-static int
+static __rte_noinline int
pipeline_atq_worker_single_stage_fwd(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_INIT;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -54,19 +49,19 @@ pipeline_atq_worker_single_stage_fwd(void *arg)
continue;
}
- w->processed_pkts++;
- ev.queue_id = tx_queue;
+ ev.queue_id = tx_queue[ev.mbuf->port];
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
pipeline_event_enqueue(dev, port, &ev);
+ w->processed_pkts++;
}
return 0;
}
-static int
+static __rte_noinline int
pipeline_atq_worker_single_stage_burst_tx(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT;
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -79,27 +74,21 @@ pipeline_atq_worker_single_stage_burst_tx(void *arg)
for (i = 0; i < nb_rx; i++) {
rte_prefetch0(ev[i + 1].mbuf);
- if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev[i].mbuf);
- ev[i].op = RTE_EVENT_OP_RELEASE;
- w->processed_pkts++;
- } else
- pipeline_fwd_event(&ev[i],
- RTE_SCHED_TYPE_ATOMIC);
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ pipeline_event_tx_burst(dev, port, ev, nb_rx);
+ w->processed_pkts += nb_rx;
}
return 0;
}
-static int
+static __rte_noinline int
pipeline_atq_worker_single_stage_burst_fwd(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -112,23 +101,22 @@ pipeline_atq_worker_single_stage_burst_fwd(void *arg)
for (i = 0; i < nb_rx; i++) {
rte_prefetch0(ev[i + 1].mbuf);
- ev[i].queue_id = tx_queue;
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
- w->processed_pkts++;
}
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ w->processed_pkts += nb_rx;
}
return 0;
}
-static int
+static __rte_noinline int
pipeline_atq_worker_multi_stage_tx(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_INIT;
- const uint8_t nb_stages = t->opt->nb_stages;
-
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -141,29 +129,24 @@ pipeline_atq_worker_multi_stage_tx(void *arg)
cq_id = ev.sub_event_type % nb_stages;
if (cq_id == last_queue) {
- if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev.mbuf);
- w->processed_pkts++;
- continue;
- }
- pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- } else {
- ev.sub_event_type++;
- pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+ pipeline_event_tx(dev, port, &ev);
+ w->processed_pkts++;
+ continue;
}
+ ev.sub_event_type++;
+ pipeline_fwd_event(&ev, sched_type_list[cq_id]);
pipeline_event_enqueue(dev, port, &ev);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_atq_worker_multi_stage_fwd(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_INIT;
- const uint8_t nb_stages = t->opt->nb_stages;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -176,9 +159,9 @@ pipeline_atq_worker_multi_stage_fwd(void *arg)
cq_id = ev.sub_event_type % nb_stages;
if (cq_id == last_queue) {
- w->processed_pkts++;
- ev.queue_id = tx_queue;
+ ev.queue_id = tx_queue[ev.mbuf->port];
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ w->processed_pkts++;
} else {
ev.sub_event_type++;
pipeline_fwd_event(&ev, sched_type_list[cq_id]);
@@ -186,14 +169,14 @@ pipeline_atq_worker_multi_stage_fwd(void *arg)
pipeline_event_enqueue(dev, port, &ev);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_atq_worker_multi_stage_burst_tx(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_BURST_INIT;
- const uint8_t nb_stages = t->opt->nb_stages;
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -209,34 +192,27 @@ pipeline_atq_worker_multi_stage_burst_tx(void *arg)
cq_id = ev[i].sub_event_type % nb_stages;
if (cq_id == last_queue) {
- if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev[i].mbuf);
- ev[i].op = RTE_EVENT_OP_RELEASE;
- w->processed_pkts++;
- continue;
- }
-
- pipeline_fwd_event(&ev[i],
- RTE_SCHED_TYPE_ATOMIC);
- } else {
- ev[i].sub_event_type++;
- pipeline_fwd_event(&ev[i],
- sched_type_list[cq_id]);
+ pipeline_event_tx(dev, port, &ev[i]);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ continue;
}
+
+ ev[i].sub_event_type++;
+ pipeline_fwd_event(&ev[i], sched_type_list[cq_id]);
}
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_BURST_INIT;
- const uint8_t nb_stages = t->opt->nb_stages;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -253,7 +229,7 @@ pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
if (cq_id == last_queue) {
w->processed_pkts++;
- ev[i].queue_id = tx_queue;
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
pipeline_fwd_event(&ev[i],
RTE_SCHED_TYPE_ATOMIC);
} else {
@@ -265,6 +241,7 @@ pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
}
+
return 0;
}
@@ -274,39 +251,36 @@ worker_wrapper(void *arg)
struct worker_data *w = arg;
struct evt_options *opt = w->t->opt;
const bool burst = evt_has_burst_mode(w->dev_id);
- const bool mt_safe = !w->t->mt_unsafe;
+ const bool internal_port = w->t->internal_port;
const uint8_t nb_stages = opt->nb_stages;
RTE_SET_USED(opt);
if (nb_stages == 1) {
- if (!burst && mt_safe)
+ if (!burst && internal_port)
return pipeline_atq_worker_single_stage_tx(arg);
- else if (!burst && !mt_safe)
+ else if (!burst && !internal_port)
return pipeline_atq_worker_single_stage_fwd(arg);
- else if (burst && mt_safe)
+ else if (burst && internal_port)
return pipeline_atq_worker_single_stage_burst_tx(arg);
- else if (burst && !mt_safe)
+ else if (burst && !internal_port)
return pipeline_atq_worker_single_stage_burst_fwd(arg);
} else {
- if (!burst && mt_safe)
+ if (!burst && internal_port)
return pipeline_atq_worker_multi_stage_tx(arg);
- else if (!burst && !mt_safe)
+ else if (!burst && !internal_port)
return pipeline_atq_worker_multi_stage_fwd(arg);
- if (burst && mt_safe)
+ if (burst && internal_port)
return pipeline_atq_worker_multi_stage_burst_tx(arg);
- else if (burst && !mt_safe)
+ else if (burst && !internal_port)
return pipeline_atq_worker_multi_stage_burst_fwd(arg);
}
+
rte_panic("invalid worker\n");
}
static int
pipeline_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
{
- struct test_pipeline *t = evt_test_priv(test);
-
- if (t->mt_unsafe)
- rte_service_component_runstate_set(t->tx_service.service_id, 1);
return pipeline_launch_lcores(test, opt, worker_wrapper);
}
@@ -317,34 +291,38 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
int nb_ports;
int nb_queues;
uint8_t queue;
- struct rte_event_dev_info info;
- struct test_pipeline *t = evt_test_priv(test);
- uint8_t tx_evqueue_id = 0;
+ uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint8_t nb_worker_queues = 0;
+ uint8_t tx_evport_id = 0;
+ uint16_t prod = 0;
+ struct rte_event_dev_info info;
+ struct test_pipeline *t = evt_test_priv(test);
nb_ports = evt_nr_active_lcores(opt->wlcores);
nb_queues = rte_eth_dev_count_avail();
- /* One extra port and queueu for Tx service */
- if (t->mt_unsafe) {
- tx_evqueue_id = nb_queues;
- nb_ports++;
- nb_queues++;
+ memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
+ memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ /* One queue for Tx adapter per port */
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ tx_evqueue_id[prod] = nb_queues;
+ nb_queues++;
+ }
}
-
rte_event_dev_info_get(opt->dev_id, &info);
const struct rte_event_dev_config config = {
- .nb_event_queues = nb_queues,
- .nb_event_ports = nb_ports,
- .nb_events_limit = info.max_num_events,
- .nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth =
- info.max_event_port_dequeue_depth,
- .nb_event_port_enqueue_depth =
- info.max_event_port_enqueue_depth,
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = info.max_num_events,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth =
+ info.max_event_port_dequeue_depth,
+ .nb_event_port_enqueue_depth =
+ info.max_event_port_enqueue_depth,
};
ret = rte_event_dev_configure(opt->dev_id, &config);
if (ret) {
@@ -353,21 +331,23 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
struct rte_event_queue_conf q_conf = {
- .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .nb_atomic_flows = opt->nb_flows,
- .nb_atomic_order_sequences = opt->nb_flows,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
};
/* queue configurations */
for (queue = 0; queue < nb_queues; queue++) {
q_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
- if (t->mt_unsafe) {
- if (queue == tx_evqueue_id) {
- q_conf.event_queue_cfg =
- RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
- } else {
- queue_arr[nb_worker_queues] = queue;
- nb_worker_queues++;
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ if (queue == tx_evqueue_id[prod]) {
+ q_conf.event_queue_cfg =
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+ } else {
+ queue_arr[nb_worker_queues] = queue;
+ nb_worker_queues++;
+ }
}
}
@@ -383,20 +363,15 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
/* port configuration */
const struct rte_event_port_conf p_conf = {
- .dequeue_depth = opt->wkr_deq_dep,
- .enqueue_depth = info.max_event_port_dequeue_depth,
- .new_event_threshold = info.max_num_events,
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = info.max_event_port_dequeue_depth,
+ .new_event_threshold = info.max_num_events,
};
- if (t->mt_unsafe) {
+ if (!t->internal_port)
ret = pipeline_event_port_setup(test, opt, queue_arr,
nb_worker_queues, p_conf);
- if (ret)
- return ret;
-
- ret = pipeline_event_tx_service_setup(test, opt, tx_evqueue_id,
- nb_ports - 1, p_conf);
- } else
+ else
ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
p_conf);
@@ -408,32 +383,34 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
*
* eth_dev_count = 2, nb_stages = 2, atq mode
*
- * Multi thread safe :
+ * eth0, eth1 have Internal port capability :
* queues = 2
* stride = 1
*
* event queue pipelines:
- * eth0 -> q0 ->tx
- * eth1 -> q1 ->tx
+ * eth0 -> q0 ->Tx
+ * eth1 -> q1 ->Tx
*
* q0, q1 are configured as ATQ so, all the different stages can
* be enqueued on the same queue.
*
- * Multi thread unsafe :
- * queues = 3
+ * eth0, eth1 use Tx adapters service core :
+ * queues = 4
* stride = 1
*
* event queue pipelines:
- * eth0 -> q0
- * } (q3->tx) Tx service
- * eth1 -> q1
+ * eth0 -> q0 -> q2 -> Tx
+ * eth1 -> q1 -> q3 -> Tx
*
- * q0,q1 are configured as stated above.
- * q3 configured as SINGLE_LINK|ATOMIC.
+ * q0, q1 are configured as stated above.
+ * q2, q3 configured as SINGLE_LINK.
*/
ret = pipeline_event_rx_adapter_setup(opt, 1, p_conf);
if (ret)
return ret;
+ ret = pipeline_event_tx_adapter_setup(opt, p_conf);
+ if (ret)
+ return ret;
if (!evt_has_distributed_sched(opt->dev_id)) {
uint32_t service_id;
@@ -445,12 +422,58 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
}
+ /* Connect the tx_evqueue_id to the Tx adapter port */
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_tx_adapter_event_port_get(prod,
+ &tx_evport_id);
+ if (ret) {
+ evt_err("Unable to get Tx adapter[%d]", prod);
+ return ret;
+ }
+
+ if (rte_event_port_link(opt->dev_id, tx_evport_id,
+ &tx_evqueue_id[prod],
+ NULL, 1) != 1) {
+ evt_err("Unable to link Tx adptr[%d] evprt[%d]",
+ prod, tx_evport_id);
+ return ret;
+ }
+ }
+ }
+
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_eth_dev_start(prod);
+ if (ret) {
+ evt_err("Ethernet dev [%d] failed to start."
+ " Using synthetic producer", prod);
+ return ret;
+ }
+ }
+
ret = rte_event_dev_start(opt->dev_id);
if (ret) {
evt_err("failed to start eventdev %d", opt->dev_id);
return ret;
}
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_rx_adapter_start(prod);
+ if (ret) {
+ evt_err("Rx adapter[%d] start failed", prod);
+ return ret;
+ }
+
+ ret = rte_event_eth_tx_adapter_start(prod);
+ if (ret) {
+ evt_err("Tx adapter[%d] start failed", prod);
+ return ret;
+ }
+ }
+
+ memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
+ RTE_MAX_ETHPORTS);
+
return 0;
}
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index a54068df..d07fa882 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -5,58 +5,6 @@
#include "test_pipeline_common.h"
-static int32_t
-pipeline_event_tx_burst_service_func(void *args)
-{
-
- int i;
- struct tx_service_data *tx = args;
- const uint8_t dev = tx->dev_id;
- const uint8_t port = tx->port_id;
- struct rte_event ev[BURST_SIZE + 1];
-
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
-
- if (!nb_rx) {
- for (i = 0; i < tx->nb_ethports; i++)
- rte_eth_tx_buffer_flush(i, 0, tx->tx_buf[i]);
- return 0;
- }
-
- for (i = 0; i < nb_rx; i++) {
- struct rte_mbuf *m = ev[i].mbuf;
- rte_eth_tx_buffer(m->port, 0, tx->tx_buf[m->port], m);
- }
- tx->processed_pkts += nb_rx;
-
- return 0;
-}
-
-static int32_t
-pipeline_event_tx_service_func(void *args)
-{
-
- int i;
- struct tx_service_data *tx = args;
- const uint8_t dev = tx->dev_id;
- const uint8_t port = tx->port_id;
- struct rte_event ev;
-
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
-
- if (!nb_rx) {
- for (i = 0; i < tx->nb_ethports; i++)
- rte_eth_tx_buffer_flush(i, 0, tx->tx_buf[i]);
- return 0;
- }
-
- struct rte_mbuf *m = ev.mbuf;
- rte_eth_tx_buffer(m->port, 0, tx->tx_buf[m->port], m);
- tx->processed_pkts++;
-
- return 0;
-}
-
int
pipeline_test_result(struct evt_test *test, struct evt_options *opt)
{
@@ -65,12 +13,12 @@ pipeline_test_result(struct evt_test *test, struct evt_options *opt)
uint64_t total = 0;
struct test_pipeline *t = evt_test_priv(test);
- printf("Packet distribution across worker cores :\n");
+ evt_info("Packet distribution across worker cores :");
for (i = 0; i < t->nb_workers; i++)
total += t->worker[i].processed_pkts;
for (i = 0; i < t->nb_workers; i++)
- printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
- CLGRN" %3.2f\n"CLNRM, i,
+ evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:"
+ CLGRN" %3.2f"CLNRM, i,
t->worker[i].processed_pkts,
(((double)t->worker[i].processed_pkts)/total)
* 100);
@@ -97,11 +45,8 @@ processed_pkts(struct test_pipeline *t)
uint64_t total = 0;
rte_smp_rmb();
- if (t->mt_unsafe)
- total = t->tx_service.processed_pkts;
- else
- for (i = 0; i < t->nb_workers; i++)
- total += t->worker[i].processed_pkts;
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
return total;
}
@@ -215,14 +160,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
{
uint16_t i;
uint8_t nb_queues = 1;
- uint8_t mt_state = 0;
struct test_pipeline *t = evt_test_priv(test);
struct rte_eth_rxconf rx_conf;
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
.rss_conf = {
@@ -234,17 +177,21 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
RTE_SET_USED(opt);
if (!rte_eth_dev_count_avail()) {
- evt_err("No ethernet ports found.\n");
+ evt_err("No ethernet ports found.");
return -ENODEV;
}
+ t->internal_port = 1;
RTE_ETH_FOREACH_DEV(i) {
struct rte_eth_dev_info dev_info;
struct rte_eth_conf local_port_conf = port_conf;
+ uint32_t caps = 0;
+
+ rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps);
+ if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+ t->internal_port = 0;
rte_eth_dev_info_get(i, &dev_info);
- mt_state = !(dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_MT_LOCKFREE);
rx_conf = dev_info.default_rxconf;
rx_conf.offloads = port_conf.rxmode.offloads;
@@ -253,7 +200,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
port_conf.rx_adv_conf.rss_conf.rss_hf) {
evt_info("Port %u modified RSS hash function based on hardware support,"
- "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ "requested:%#"PRIx64" configured:%#"PRIx64"",
i,
port_conf.rx_adv_conf.rss_conf.rss_hf,
local_port_conf.rx_adv_conf.rss_conf.rss_hf);
@@ -262,28 +209,23 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
if (rte_eth_dev_configure(i, nb_queues, nb_queues,
&local_port_conf)
< 0) {
- evt_err("Failed to configure eth port [%d]\n", i);
+ evt_err("Failed to configure eth port [%d]", i);
return -EINVAL;
}
if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
rte_socket_id(), &rx_conf, t->pool) < 0) {
- evt_err("Failed to setup eth port [%d] rx_queue: %d.\n",
+ evt_err("Failed to setup eth port [%d] rx_queue: %d.",
i, 0);
return -EINVAL;
}
if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
rte_socket_id(), NULL) < 0) {
- evt_err("Failed to setup eth port [%d] tx_queue: %d.\n",
+ evt_err("Failed to setup eth port [%d] tx_queue: %d.",
i, 0);
return -EINVAL;
}
- t->mt_unsafe |= mt_state;
- t->tx_service.tx_buf[i] =
- rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(BURST_SIZE), 0);
- if (t->tx_service.tx_buf[i] == NULL)
- rte_panic("Unable to allocate Tx buffer memory.");
rte_eth_promiscuous_enable(i);
}
@@ -295,7 +237,6 @@ pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t *queue_arr, uint8_t nb_queues,
const struct rte_event_port_conf p_conf)
{
- int i;
int ret;
uint8_t port;
struct test_pipeline *t = evt_test_priv(test);
@@ -316,23 +257,15 @@ pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
return ret;
}
- if (queue_arr == NULL) {
- if (rte_event_port_link(opt->dev_id, port, NULL, NULL,
- 0) != nb_queues)
- goto link_fail;
- } else {
- for (i = 0; i < nb_queues; i++) {
- if (rte_event_port_link(opt->dev_id, port,
- &queue_arr[i], NULL, 1) != 1)
- goto link_fail;
- }
- }
+ if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL,
+ nb_queues) != nb_queues)
+ goto link_fail;
}
return 0;
link_fail:
- evt_err("failed to link all queues to port %d", port);
+ evt_err("failed to link queues to port %d", port);
return -EINVAL;
}
@@ -380,85 +313,69 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
ret = evt_service_setup(service_id);
if (ret) {
evt_err("Failed to setup service core"
- " for Rx adapter\n");
+ " for Rx adapter");
return ret;
}
}
- ret = rte_eth_dev_start(prod);
- if (ret) {
- evt_err("Ethernet dev [%d] failed to start."
- " Using synthetic producer", prod);
- return ret;
- }
-
- ret = rte_event_eth_rx_adapter_start(prod);
- if (ret) {
- evt_err("Rx adapter[%d] start failed", prod);
- return ret;
- }
- printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__,
- prod, prod);
+ evt_info("Port[%d] using Rx adapter[%d] configured", prod,
+ prod);
}
return ret;
}
int
-pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt,
- uint8_t tx_queue_id, uint8_t tx_port_id,
- const struct rte_event_port_conf p_conf)
+pipeline_event_tx_adapter_setup(struct evt_options *opt,
+ struct rte_event_port_conf port_conf)
{
int ret;
- struct rte_service_spec serv;
- struct test_pipeline *t = evt_test_priv(test);
- struct tx_service_data *tx = &t->tx_service;
+ uint16_t consm;
- ret = rte_event_port_setup(opt->dev_id, tx_port_id, &p_conf);
- if (ret) {
- evt_err("failed to setup port %d", tx_port_id);
- return ret;
- }
+ RTE_ETH_FOREACH_DEV(consm) {
+ uint32_t cap;
- if (rte_event_port_link(opt->dev_id, tx_port_id, &tx_queue_id,
- NULL, 1) != 1) {
- evt_err("failed to link queues to port %d", tx_port_id);
- return -EINVAL;
- }
+ ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id,
+ consm, &cap);
+ if (ret) {
+ evt_err("failed to get event tx adapter[%d] caps",
+ consm);
+ return ret;
+ }
- tx->dev_id = opt->dev_id;
- tx->queue_id = tx_queue_id;
- tx->port_id = tx_port_id;
- tx->nb_ethports = rte_eth_dev_count_avail();
- tx->t = t;
-
- /* Register Tx service */
- memset(&serv, 0, sizeof(struct rte_service_spec));
- snprintf(serv.name, sizeof(serv.name), "Tx_service");
-
- if (evt_has_burst_mode(opt->dev_id))
- serv.callback = pipeline_event_tx_burst_service_func;
- else
- serv.callback = pipeline_event_tx_service_func;
-
- serv.callback_userdata = (void *)tx;
- ret = rte_service_component_register(&serv, &tx->service_id);
- if (ret) {
- evt_err("failed to register Tx service");
- return ret;
- }
+ ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id,
+ &port_conf);
+ if (ret) {
+ evt_err("failed to create tx adapter[%d]", consm);
+ return ret;
+ }
- ret = evt_service_setup(tx->service_id);
- if (ret) {
- evt_err("Failed to setup service core for Tx service\n");
- return ret;
- }
+ ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1);
+ if (ret) {
+ evt_err("failed to add tx queues to adapter[%d]",
+ consm);
+ return ret;
+ }
- rte_service_runstate_set(tx->service_id, 1);
+ if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id;
- return 0;
-}
+ rte_event_eth_tx_adapter_service_id_get(consm,
+ &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for Tx adapter\n");
+ return ret;
+ }
+ }
+
+ evt_info("Port[%d] using Tx adapter[%d] Configured", consm,
+ consm);
+ }
+ return ret;
+}
void
pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
@@ -466,16 +383,10 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
uint16_t i;
RTE_SET_USED(test);
RTE_SET_USED(opt);
- struct test_pipeline *t = evt_test_priv(test);
-
- if (t->mt_unsafe) {
- rte_service_component_runstate_set(t->tx_service.service_id, 0);
- rte_service_runstate_set(t->tx_service.service_id, 0);
- rte_service_component_unregister(t->tx_service.service_id);
- }
RTE_ETH_FOREACH_DEV(i) {
rte_event_eth_rx_adapter_stop(i);
+ rte_event_eth_tx_adapter_stop(i);
rte_eth_dev_stop(i);
}
}
@@ -485,7 +396,6 @@ pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
{
RTE_SET_USED(test);
- rte_event_dev_stop(opt->dev_id);
rte_event_dev_close(opt->dev_id);
}
diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
index 5fb91607..0440b9e2 100644
--- a/app/test-eventdev/test_pipeline_common.h
+++ b/app/test-eventdev/test_pipeline_common.h
@@ -14,6 +14,7 @@
#include <rte_ethdev.h>
#include <rte_eventdev.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
@@ -35,52 +36,41 @@ struct worker_data {
struct test_pipeline *t;
} __rte_cache_aligned;
-struct tx_service_data {
- uint8_t dev_id;
- uint8_t queue_id;
- uint8_t port_id;
- uint32_t service_id;
- uint64_t processed_pkts;
- uint16_t nb_ethports;
- struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
- struct test_pipeline *t;
-} __rte_cache_aligned;
-
struct test_pipeline {
/* Don't change the offset of "done". Signal handler use this memory
* to terminate all lcores work.
*/
int done;
uint8_t nb_workers;
- uint8_t mt_unsafe;
+ uint8_t internal_port;
+ uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
enum evt_test_result result;
uint32_t nb_flows;
uint64_t outstand_pkts;
struct rte_mempool *pool;
struct worker_data worker[EVT_MAX_PORTS];
- struct tx_service_data tx_service;
struct evt_options *opt;
uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
} __rte_cache_aligned;
#define BURST_SIZE 16
-#define PIPELINE_WROKER_SINGLE_STAGE_INIT \
+#define PIPELINE_WORKER_SINGLE_STAGE_INIT \
struct worker_data *w = arg; \
struct test_pipeline *t = w->t; \
const uint8_t dev = w->dev_id; \
const uint8_t port = w->port_id; \
- struct rte_event ev
+ struct rte_event ev __rte_cache_aligned
-#define PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT \
+#define PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT \
int i; \
struct worker_data *w = arg; \
struct test_pipeline *t = w->t; \
const uint8_t dev = w->dev_id; \
const uint8_t port = w->port_id; \
- struct rte_event ev[BURST_SIZE + 1]
+ struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
-#define PIPELINE_WROKER_MULTI_STAGE_INIT \
+#define PIPELINE_WORKER_MULTI_STAGE_INIT \
struct worker_data *w = arg; \
struct test_pipeline *t = w->t; \
uint8_t cq_id; \
@@ -88,10 +78,11 @@ struct test_pipeline {
const uint8_t port = w->port_id; \
const uint8_t last_queue = t->opt->nb_stages - 1; \
uint8_t *const sched_type_list = &t->sched_type_list[0]; \
- struct rte_event ev
+ const uint8_t nb_stages = t->opt->nb_stages + 1; \
+ struct rte_event ev __rte_cache_aligned
-#define PIPELINE_WROKER_MULTI_STAGE_BURST_INIT \
- int i; \
+#define PIPELINE_WORKER_MULTI_STAGE_BURST_INIT \
+ int i; \
struct worker_data *w = arg; \
struct test_pipeline *t = w->t; \
uint8_t cq_id; \
@@ -99,7 +90,8 @@ struct test_pipeline {
const uint8_t port = w->port_id; \
const uint8_t last_queue = t->opt->nb_stages - 1; \
uint8_t *const sched_type_list = &t->sched_type_list[0]; \
- struct rte_event ev[BURST_SIZE + 1]
+ const uint8_t nb_stages = t->opt->nb_stages + 1; \
+ struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
static __rte_always_inline void
pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
@@ -110,6 +102,28 @@ pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
}
static __rte_always_inline void
+pipeline_event_tx(const uint8_t dev, const uint8_t port,
+ struct rte_event * const ev)
+{
+ rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
+ while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1))
+ rte_pause();
+}
+
+static __rte_always_inline void
+pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
+ struct rte_event *ev, const uint16_t nb_rx)
+{
+ uint16_t enq;
+
+ enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_eth_tx_adapter_enqueue(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+}
+
+static __rte_always_inline void
pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
struct rte_event *ev)
{
@@ -130,13 +144,6 @@ pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
}
}
-static __rte_always_inline void
-pipeline_tx_pkt(struct rte_mbuf *mbuf)
-{
- while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
- rte_pause();
-}
-
static inline int
pipeline_nb_event_ports(struct evt_options *opt)
{
@@ -149,9 +156,8 @@ int pipeline_test_setup(struct evt_test *test, struct evt_options *opt);
int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt);
int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
struct rte_event_port_conf prod_conf);
-int pipeline_event_tx_service_setup(struct evt_test *test,
- struct evt_options *opt, uint8_t tx_queue_id,
- uint8_t tx_port_id, const struct rte_event_port_conf p_conf);
+int pipeline_event_tx_adapter_setup(struct evt_options *opt,
+ struct rte_event_port_conf prod_conf);
int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt);
int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t *queue_arr, uint8_t nb_queues,
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index ca5f4578..25217008 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -15,10 +15,10 @@ pipeline_queue_nb_event_queues(struct evt_options *opt)
return (eth_count * opt->nb_stages) + eth_count;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_single_stage_tx(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_INIT;
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -29,7 +29,7 @@ pipeline_queue_worker_single_stage_tx(void *arg)
}
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- pipeline_tx_pkt(ev.mbuf);
+ pipeline_event_tx(dev, port, &ev);
w->processed_pkts++;
} else {
ev.queue_id++;
@@ -41,11 +41,11 @@ pipeline_queue_worker_single_stage_tx(void *arg)
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_single_stage_fwd(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_INIT;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -55,7 +55,8 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
continue;
}
- ev.queue_id = tx_queue;
+ ev.queue_id = tx_queue[ev.mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
pipeline_event_enqueue(dev, port, &ev);
w->processed_pkts++;
@@ -64,10 +65,10 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_single_stage_burst_tx(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT;
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -81,8 +82,7 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
for (i = 0; i < nb_rx; i++) {
rte_prefetch0(ev[i + 1].mbuf);
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev[i].mbuf);
+ pipeline_event_tx(dev, port, &ev[i]);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
} else {
@@ -98,11 +98,11 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_single_stage_burst_fwd(void *arg)
{
- PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -115,23 +115,24 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
for (i = 0; i < nb_rx; i++) {
rte_prefetch0(ev[i + 1].mbuf);
- ev[i].queue_id = tx_queue;
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
- w->processed_pkts++;
}
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ w->processed_pkts += nb_rx;
}
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_multi_stage_tx(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_INIT;
- const uint8_t nb_stages = t->opt->nb_stages + 1;
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -143,31 +144,27 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
cq_id = ev.queue_id % nb_stages;
- if (cq_id >= last_queue) {
- if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev.mbuf);
- w->processed_pkts++;
- continue;
- }
- ev.queue_id += (cq_id == last_queue) ? 1 : 0;
- pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- } else {
- ev.queue_id++;
- pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+ if (ev.queue_id == tx_queue[ev.mbuf->port]) {
+ pipeline_event_tx(dev, port, &ev);
+ w->processed_pkts++;
+ continue;
}
+ ev.queue_id++;
+ pipeline_fwd_event(&ev, cq_id != last_queue ?
+ sched_type_list[cq_id] :
+ RTE_SCHED_TYPE_ATOMIC);
pipeline_event_enqueue(dev, port, &ev);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_multi_stage_fwd(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_INIT;
- const uint8_t nb_stages = t->opt->nb_stages + 1;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -180,7 +177,8 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
cq_id = ev.queue_id % nb_stages;
if (cq_id == last_queue) {
- ev.queue_id = tx_queue;
+ ev.queue_id = tx_queue[ev.mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
w->processed_pkts++;
} else {
@@ -190,14 +188,15 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
pipeline_event_enqueue(dev, port, &ev);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_multi_stage_burst_tx(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_BURST_INIT;
- const uint8_t nb_stages = t->opt->nb_stages + 1;
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -212,37 +211,30 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
rte_prefetch0(ev[i + 1].mbuf);
cq_id = ev[i].queue_id % nb_stages;
- if (cq_id >= last_queue) {
- if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
-
- pipeline_tx_pkt(ev[i].mbuf);
- ev[i].op = RTE_EVENT_OP_RELEASE;
- w->processed_pkts++;
- continue;
- }
-
- ev[i].queue_id += (cq_id == last_queue) ? 1 : 0;
- pipeline_fwd_event(&ev[i],
- RTE_SCHED_TYPE_ATOMIC);
- } else {
- ev[i].queue_id++;
- pipeline_fwd_event(&ev[i],
- sched_type_list[cq_id]);
+ if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
+ pipeline_event_tx(dev, port, &ev[i]);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ continue;
}
+ ev[i].queue_id++;
+ pipeline_fwd_event(&ev[i], cq_id != last_queue ?
+ sched_type_list[cq_id] :
+ RTE_SCHED_TYPE_ATOMIC);
}
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
}
+
return 0;
}
-static int
+static __rte_noinline int
pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
{
- PIPELINE_WROKER_MULTI_STAGE_BURST_INIT;
- const uint8_t nb_stages = t->opt->nb_stages + 1;
- const uint8_t tx_queue = t->tx_service.queue_id;
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
while (t->done == false) {
uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
@@ -258,7 +250,8 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
cq_id = ev[i].queue_id % nb_stages;
if (cq_id == last_queue) {
- ev[i].queue_id = tx_queue;
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
pipeline_fwd_event(&ev[i],
RTE_SCHED_TYPE_ATOMIC);
w->processed_pkts++;
@@ -271,6 +264,7 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
}
+
return 0;
}
@@ -280,28 +274,28 @@ worker_wrapper(void *arg)
struct worker_data *w = arg;
struct evt_options *opt = w->t->opt;
const bool burst = evt_has_burst_mode(w->dev_id);
- const bool mt_safe = !w->t->mt_unsafe;
+ const bool internal_port = w->t->internal_port;
const uint8_t nb_stages = opt->nb_stages;
RTE_SET_USED(opt);
if (nb_stages == 1) {
- if (!burst && mt_safe)
+ if (!burst && internal_port)
return pipeline_queue_worker_single_stage_tx(arg);
- else if (!burst && !mt_safe)
+ else if (!burst && !internal_port)
return pipeline_queue_worker_single_stage_fwd(arg);
- else if (burst && mt_safe)
+ else if (burst && internal_port)
return pipeline_queue_worker_single_stage_burst_tx(arg);
- else if (burst && !mt_safe)
+ else if (burst && !internal_port)
return pipeline_queue_worker_single_stage_burst_fwd(
arg);
} else {
- if (!burst && mt_safe)
+ if (!burst && internal_port)
return pipeline_queue_worker_multi_stage_tx(arg);
- else if (!burst && !mt_safe)
+ else if (!burst && !internal_port)
return pipeline_queue_worker_multi_stage_fwd(arg);
- else if (burst && mt_safe)
+ else if (burst && internal_port)
return pipeline_queue_worker_multi_stage_burst_tx(arg);
- else if (burst && !mt_safe)
+ else if (burst && !internal_port)
return pipeline_queue_worker_multi_stage_burst_fwd(arg);
}
@@ -311,10 +305,6 @@ worker_wrapper(void *arg)
static int
pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
{
- struct test_pipeline *t = evt_test_priv(test);
-
- if (t->mt_unsafe)
- rte_service_component_runstate_set(t->tx_service.service_id, 1);
return pipeline_launch_lcores(test, opt, worker_wrapper);
}
@@ -326,25 +316,24 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
int nb_queues;
int nb_stages = opt->nb_stages;
uint8_t queue;
- struct rte_event_dev_info info;
- struct test_pipeline *t = evt_test_priv(test);
- uint8_t tx_evqueue_id = 0;
+ uint8_t tx_evport_id = 0;
+ uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint8_t nb_worker_queues = 0;
+ uint16_t prod = 0;
+ struct rte_event_dev_info info;
+ struct test_pipeline *t = evt_test_priv(test);
nb_ports = evt_nr_active_lcores(opt->wlcores);
nb_queues = rte_eth_dev_count_avail() * (nb_stages);
- /* Extra port for Tx service. */
- if (t->mt_unsafe) {
- tx_evqueue_id = nb_queues;
- nb_ports++;
- nb_queues++;
- } else
- nb_queues += rte_eth_dev_count_avail();
+ /* One queue for Tx adapter per port */
+ nb_queues += rte_eth_dev_count_avail();
- rte_event_dev_info_get(opt->dev_id, &info);
+ memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
+ memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ rte_event_dev_info_get(opt->dev_id, &info);
const struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
@@ -370,24 +359,19 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
for (queue = 0; queue < nb_queues; queue++) {
uint8_t slot;
- if (!t->mt_unsafe) {
- slot = queue % (nb_stages + 1);
- q_conf.schedule_type = slot == nb_stages ?
- RTE_SCHED_TYPE_ATOMIC :
- opt->sched_type_list[slot];
- } else {
- slot = queue % nb_stages;
-
- if (queue == tx_evqueue_id) {
- q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ q_conf.event_queue_cfg = 0;
+ slot = queue % (nb_stages + 1);
+ if (slot == nb_stages) {
+ q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ if (!t->internal_port) {
q_conf.event_queue_cfg =
RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
- } else {
- q_conf.schedule_type =
- opt->sched_type_list[slot];
- queue_arr[nb_worker_queues] = queue;
- nb_worker_queues++;
}
+ tx_evqueue_id[prod++] = queue;
+ } else {
+ q_conf.schedule_type = opt->sched_type_list[slot];
+ queue_arr[nb_worker_queues] = queue;
+ nb_worker_queues++;
}
ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
@@ -407,19 +391,11 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
.new_event_threshold = info.max_num_events,
};
- /*
- * If tx is multi thread safe then allow workers to do Tx else use Tx
- * service to Tx packets.
- */
- if (t->mt_unsafe) {
+ if (!t->internal_port) {
ret = pipeline_event_port_setup(test, opt, queue_arr,
nb_worker_queues, p_conf);
if (ret)
return ret;
-
- ret = pipeline_event_tx_service_setup(test, opt, tx_evqueue_id,
- nb_ports - 1, p_conf);
-
} else
ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
p_conf);
@@ -431,7 +407,6 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
*
* eth_dev_count = 2, nb_stages = 2.
*
- * Multi thread safe :
* queues = 6
* stride = 3
*
@@ -439,21 +414,14 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
* eth0 -> q0 -> q1 -> (q2->tx)
* eth1 -> q3 -> q4 -> (q5->tx)
*
- * q2, q5 configured as ATOMIC
- *
- * Multi thread unsafe :
- * queues = 5
- * stride = 2
- *
- * event queue pipelines:
- * eth0 -> q0 -> q1
- * } (q4->tx) Tx service
- * eth1 -> q2 -> q3
+ * q2, q5 configured as ATOMIC | SINGLE_LINK
*
- * q4 configured as SINGLE_LINK|ATOMIC
*/
- ret = pipeline_event_rx_adapter_setup(opt,
- t->mt_unsafe ? nb_stages : nb_stages + 1, p_conf);
+ ret = pipeline_event_rx_adapter_setup(opt, nb_stages + 1, p_conf);
+ if (ret)
+ return ret;
+
+ ret = pipeline_event_tx_adapter_setup(opt, p_conf);
if (ret)
return ret;
@@ -467,12 +435,60 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
}
+ /* Connect the tx_evqueue_id to the Tx adapter port */
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_tx_adapter_event_port_get(prod,
+ &tx_evport_id);
+ if (ret) {
+ evt_err("Unable to get Tx adptr[%d] evprt[%d]",
+ prod, tx_evport_id);
+ return ret;
+ }
+
+ if (rte_event_port_link(opt->dev_id, tx_evport_id,
+ &tx_evqueue_id[prod],
+ NULL, 1) != 1) {
+ evt_err("Unable to link Tx adptr[%d] evprt[%d]",
+ prod, tx_evport_id);
+ return ret;
+ }
+ }
+ }
+
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_eth_dev_start(prod);
+ if (ret) {
+ evt_err("Ethernet dev [%d] failed to start."
+ " Using synthetic producer", prod);
+ return ret;
+ }
+
+ }
+
ret = rte_event_dev_start(opt->dev_id);
if (ret) {
evt_err("failed to start eventdev %d", opt->dev_id);
return ret;
}
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_rx_adapter_start(prod);
+ if (ret) {
+ evt_err("Rx adapter[%d] start failed", prod);
+ return ret;
+ }
+
+ ret = rte_event_eth_tx_adapter_start(prod);
+ if (ret) {
+ evt_err("Tx adapter[%d] start failed", prod);
+ return ret;
+ }
+ }
+
+ memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
+ RTE_MAX_ETHPORTS);
+
return 0;
}
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index 2b4d604b..d5258eae 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -33,8 +33,10 @@ SRCS-y += rxonly.c
SRCS-y += txonly.c
SRCS-y += csumonly.c
SRCS-y += icmpecho.c
+SRCS-y += noisy_vnf.c
SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_cmd.c
+SRCS-y += util.c
ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC), y)
SRCS-y += softnicfwd.c
@@ -70,8 +72,6 @@ endif
endif
-CFLAGS_cmdline.o := -D_GNU_SOURCE
-
include $(RTE_SDK)/mk/rte.app.mk
endif
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 589121d6..1050fde9 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -167,7 +167,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"Display:\n"
"--------\n\n"
- "show port (info|stats|xstats|fdir|stat_qmap|dcb_tc|cap) (port_id|all)\n"
+ "show port (info|stats|summary|xstats|fdir|stat_qmap|dcb_tc|cap) (port_id|all)\n"
" Display information for port_id, or all.\n\n"
"show port X rss reta (size) (mask0,mask1,...)\n"
@@ -175,11 +175,8 @@ static void cmd_help_long_parsed(void *parsed_result,
" by masks on port X. size is used to indicate the"
" hardware supported reta size\n\n"
- "show port rss-hash ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|"
- "ipv4-sctp|ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|"
- "ipv6-other|l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex [key]\n"
- " Display the RSS hash functions and RSS hash key"
- " of port X\n\n"
+ "show port (port_id) rss-hash [key]\n"
+ " Display the RSS hash functions and RSS hash key of port\n\n"
"clear port (info|stats|xstats|fdir|stat_qmap) (port_id|all)\n"
" Clear information for port_id, or all.\n\n"
@@ -283,6 +280,9 @@ static void cmd_help_long_parsed(void *parsed_result,
"set portlist (x[,y]*)\n"
" Set the list of forwarding ports.\n\n"
+ "set port setup on (iterator|event)\n"
+ " Select how attached port is retrieved for setup.\n\n"
+
"set tx loopback (port_id) (on|off)\n"
" Enable or disable tx loopback.\n\n"
@@ -397,12 +397,13 @@ static void cmd_help_long_parsed(void *parsed_result,
" Disable hardware insertion of a VLAN header in"
" packets sent on a port.\n\n"
- "csum set (ip|udp|tcp|sctp|outer-ip) (hw|sw) (port_id)\n"
+ "csum set (ip|udp|tcp|sctp|outer-ip|outer-udp) (hw|sw) (port_id)\n"
" Select hardware or software calculation of the"
" checksum when transmitting a packet using the"
" csum forward engine.\n"
" ip|udp|tcp|sctp always concern the inner layer.\n"
" outer-ip concerns the outer IP layer in"
+ " outer-udp concerns the outer UDP layer in"
" case the packet is recognized as a tunnel packet by"
" the forward engine (vxlan, gre and ipip are supported)\n"
" Please check the NIC datasheet for HW limits.\n\n"
@@ -883,6 +884,10 @@ static void cmd_help_long_parsed(void *parsed_result,
" Start/stop a rx/tx queue of port X. Only take effect"
" when port X is started\n\n"
+ "port (port_id) (rxq|txq) (queue_id) deferred_start (on|off)\n"
+ " Switch on/off a deferred start of port X rx/tx queue. Only"
+ " take effect when port X is stopped.\n\n"
+
"port (port_id) (rxq|txq) (queue_id) setup\n"
" Setup a rx/tx queue of port X.\n\n"
@@ -1247,6 +1252,59 @@ cmdline_parse_inst_t cmd_operate_specific_port = {
},
};
+/* *** enable port setup (after attach) via iterator or event *** */
+struct cmd_set_port_setup_on_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t setup;
+ cmdline_fixed_string_t on;
+ cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_port_setup_on_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_port_setup_on_result *res = parsed_result;
+
+ if (strcmp(res->mode, "event") == 0)
+ setup_on_probe_event = true;
+ else if (strcmp(res->mode, "iterator") == 0)
+ setup_on_probe_event = false;
+ else
+ printf("Unknown mode\n");
+}
+
+cmdline_parse_token_string_t cmd_set_port_setup_on_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_port_setup_on_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_set_port_setup_on_setup =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result,
+ setup, "setup");
+cmdline_parse_token_string_t cmd_set_port_setup_on_on =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result,
+ on, "on");
+cmdline_parse_token_string_t cmd_set_port_setup_on_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result,
+ mode, "iterator#event");
+
+cmdline_parse_inst_t cmd_set_port_setup_on = {
+ .f = cmd_set_port_setup_on_parsed,
+ .data = NULL,
+ .help_str = "set port setup on iterator|event",
+ .tokens = {
+ (void *)&cmd_set_port_setup_on_set,
+ (void *)&cmd_set_port_setup_on_port,
+ (void *)&cmd_set_port_setup_on_setup,
+ (void *)&cmd_set_port_setup_on_on,
+ (void *)&cmd_set_port_setup_on_mode,
+ NULL,
+ },
+};
+
/* *** attach a specified port *** */
struct cmd_operate_attach_port_result {
cmdline_fixed_string_t port;
@@ -1303,7 +1361,7 @@ static void cmd_operate_detach_port_parsed(void *parsed_result,
struct cmd_operate_detach_port_result *res = parsed_result;
if (!strcmp(res->keyword, "detach"))
- detach_port(res->port_id);
+ detach_port_device(res->port_id);
else
printf("Unknown parameter\n");
}
@@ -1898,11 +1956,9 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
rx_offloads = port->dev_conf.rxmode.offloads;
if (!strcmp(res->name, "crc-strip")) {
if (!strcmp(res->value, "on")) {
- rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
rx_offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
} else if (!strcmp(res->value, "off")) {
rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
- rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
} else {
printf("Unknown parameter\n");
return;
@@ -2441,6 +2497,92 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = {
},
};
+/* *** configure port rxq/txq deferred start on/off *** */
+struct cmd_config_deferred_start_rxtx_queue {
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t rxtxq;
+ uint16_t qid;
+ cmdline_fixed_string_t opname;
+ cmdline_fixed_string_t state;
+};
+
+static void
+cmd_config_deferred_start_rxtx_queue_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_deferred_start_rxtx_queue *res = parsed_result;
+ struct rte_port *port;
+ uint8_t isrx;
+ uint8_t ison;
+ uint8_t needreconfig = 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ if (port_is_started(res->port_id) != 0) {
+ printf("Please stop port %u first\n", res->port_id);
+ return;
+ }
+
+ port = &ports[res->port_id];
+
+ isrx = !strcmp(res->rxtxq, "rxq");
+
+ if (isrx && rx_queue_id_is_invalid(res->qid))
+ return;
+ else if (!isrx && tx_queue_id_is_invalid(res->qid))
+ return;
+
+ ison = !strcmp(res->state, "on");
+
+ if (isrx && port->rx_conf[res->qid].rx_deferred_start != ison) {
+ port->rx_conf[res->qid].rx_deferred_start = ison;
+ needreconfig = 1;
+ } else if (!isrx && port->tx_conf[res->qid].tx_deferred_start != ison) {
+ port->tx_conf[res->qid].tx_deferred_start = ison;
+ needreconfig = 1;
+ }
+
+ if (needreconfig)
+ cmd_reconfig_device_queue(res->port_id, 0, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_deferred_start_rxtx_queue_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue,
+ port, "port");
+cmdline_parse_token_num_t cmd_config_deferred_start_rxtx_queue_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_deferred_start_rxtx_queue_rxtxq =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue,
+ rxtxq, "rxq#txq");
+cmdline_parse_token_num_t cmd_config_deferred_start_rxtx_queue_qid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue,
+ qid, UINT16);
+cmdline_parse_token_string_t cmd_config_deferred_start_rxtx_queue_opname =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue,
+ opname, "deferred_start");
+cmdline_parse_token_string_t cmd_config_deferred_start_rxtx_queue_state =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue,
+ state, "on#off");
+
+cmdline_parse_inst_t cmd_config_deferred_start_rxtx_queue = {
+ .f = cmd_config_deferred_start_rxtx_queue_parsed,
+ .data = NULL,
+ .help_str = "port <port_id> rxq|txq <queue_id> deferred_start on|off",
+ .tokens = {
+ (void *)&cmd_config_deferred_start_rxtx_queue_port,
+ (void *)&cmd_config_deferred_start_rxtx_queue_port_id,
+ (void *)&cmd_config_deferred_start_rxtx_queue_rxtxq,
+ (void *)&cmd_config_deferred_start_rxtx_queue_qid,
+ (void *)&cmd_config_deferred_start_rxtx_queue_opname,
+ (void *)&cmd_config_deferred_start_rxtx_queue_state,
+ NULL,
+ },
+};
+
/* *** configure port rxq/txq setup *** */
struct cmd_setup_rxtx_queue {
cmdline_fixed_string_t port;
@@ -2816,8 +2958,7 @@ static void cmd_showport_rss_hash_parsed(void *parsed_result,
{
struct cmd_showport_rss_hash *res = parsed_result;
- port_rss_hash_conf_show(res->port_id, res->rss_type,
- show_rss_key != NULL);
+ port_rss_hash_conf_show(res->port_id, show_rss_key != NULL);
}
cmdline_parse_token_string_t cmd_showport_rss_hash_show =
@@ -2829,28 +2970,18 @@ cmdline_parse_token_num_t cmd_showport_rss_hash_port_id =
cmdline_parse_token_string_t cmd_showport_rss_hash_rss_hash =
TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, rss_hash,
"rss-hash");
-cmdline_parse_token_string_t cmd_showport_rss_hash_rss_hash_info =
- TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, rss_type,
- "ipv4#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#"
- "ipv4-other#ipv6#ipv6-frag#ipv6-tcp#ipv6-udp#"
- "ipv6-sctp#ipv6-other#l2-payload#ipv6-ex#"
- "ipv6-tcp-ex#ipv6-udp-ex");
cmdline_parse_token_string_t cmd_showport_rss_hash_rss_key =
TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, key, "key");
cmdline_parse_inst_t cmd_showport_rss_hash = {
.f = cmd_showport_rss_hash_parsed,
.data = NULL,
- .help_str = "show port <port_id> rss-hash "
- "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
- "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
- "l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex",
+ .help_str = "show port <port_id> rss-hash",
.tokens = {
(void *)&cmd_showport_rss_hash_show,
(void *)&cmd_showport_rss_hash_port,
(void *)&cmd_showport_rss_hash_port_id,
(void *)&cmd_showport_rss_hash_rss_hash,
- (void *)&cmd_showport_rss_hash_rss_hash_info,
NULL,
},
};
@@ -2858,16 +2989,12 @@ cmdline_parse_inst_t cmd_showport_rss_hash = {
cmdline_parse_inst_t cmd_showport_rss_hash_key = {
.f = cmd_showport_rss_hash_parsed,
.data = (void *)1,
- .help_str = "show port <port_id> rss-hash "
- "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
- "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
- "l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex key",
+ .help_str = "show port <port_id> rss-hash key",
.tokens = {
(void *)&cmd_showport_rss_hash_show,
(void *)&cmd_showport_rss_hash_port,
(void *)&cmd_showport_rss_hash_port_id,
(void *)&cmd_showport_rss_hash_rss_hash,
- (void *)&cmd_showport_rss_hash_rss_hash_info,
(void *)&cmd_showport_rss_hash_rss_key,
NULL,
},
@@ -4089,6 +4216,8 @@ csum_show(int port_id)
(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
printf("Outer-Ip checksum offload is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
+ printf("Outer-Udp checksum offload is %s\n",
+ (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
/* display warnings if configuration is not supported by the NIC */
rte_eth_dev_info_get(port_id, &dev_info);
@@ -4117,6 +4246,12 @@ csum_show(int port_id)
printf("Warning: hardware outer IP checksum enabled but not "
"supported by port %d\n", port_id);
}
+ if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
+ (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ == 0) {
+ printf("Warning: hardware outer UDP checksum enabled but not "
+ "supported by port %d\n", port_id);
+ }
}
static void
@@ -4185,6 +4320,15 @@ cmd_csum_parsed(void *parsed_result,
printf("Outer IP checksum offload is not "
"supported by port %u\n", res->port_id);
}
+ } else if (!strcmp(res->proto, "outer-udp")) {
+ if (hw == 0 || (dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
+ csum_offloads |=
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ } else {
+ printf("Outer UDP checksum offload is not "
+ "supported by port %u\n", res->port_id);
+ }
}
if (hw) {
@@ -4208,7 +4352,7 @@ cmdline_parse_token_string_t cmd_csum_mode =
mode, "set");
cmdline_parse_token_string_t cmd_csum_proto =
TOKEN_STRING_INITIALIZER(struct cmd_csum_result,
- proto, "ip#tcp#udp#sctp#outer-ip");
+ proto, "ip#tcp#udp#sctp#outer-ip#outer-udp");
cmdline_parse_token_string_t cmd_csum_hwsw =
TOKEN_STRING_INITIALIZER(struct cmd_csum_result,
hwsw, "hw#sw");
@@ -4219,7 +4363,7 @@ cmdline_parse_token_num_t cmd_csum_portid =
cmdline_parse_inst_t cmd_csum_set = {
.f = cmd_csum_parsed,
.data = NULL,
- .help_str = "csum set ip|tcp|udp|sctp|outer-ip hw|sw <port_id>: "
+ .help_str = "csum set ip|tcp|udp|sctp|outer-ip|outer-udp hw|sw <port_id>: "
"Enable/Disable hardware calculation of L3/L4 checksum when "
"using csum forward engine",
.tokens = {
@@ -4279,7 +4423,7 @@ cmdline_parse_token_string_t cmd_csum_tunnel_csum =
csum, "csum");
cmdline_parse_token_string_t cmd_csum_tunnel_parse =
TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result,
- parse, "parse_tunnel");
+ parse, "parse-tunnel");
cmdline_parse_token_string_t cmd_csum_tunnel_onoff =
TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result,
onoff, "on#off");
@@ -4290,7 +4434,7 @@ cmdline_parse_token_num_t cmd_csum_tunnel_portid =
cmdline_parse_inst_t cmd_csum_tunnel = {
.f = cmd_csum_tunnel_parsed,
.data = NULL,
- .help_str = "csum parse_tunnel on|off <port_id>: "
+ .help_str = "csum parse-tunnel on|off <port_id>: "
"Enable/Disable parsing of tunnels for csum engine",
.tokens = {
(void *)&cmd_csum_tunnel_csum,
@@ -7073,6 +7217,11 @@ static void cmd_showportall_parsed(void *parsed_result,
} else if (!strcmp(res->what, "info"))
RTE_ETH_FOREACH_DEV(i)
port_infos_display(i);
+ else if (!strcmp(res->what, "summary")) {
+ port_summary_header_display();
+ RTE_ETH_FOREACH_DEV(i)
+ port_summary_display(i);
+ }
else if (!strcmp(res->what, "stats"))
RTE_ETH_FOREACH_DEV(i)
nic_stats_display(i);
@@ -7100,14 +7249,14 @@ cmdline_parse_token_string_t cmd_showportall_port =
TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, port, "port");
cmdline_parse_token_string_t cmd_showportall_what =
TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, what,
- "info#stats#xstats#fdir#stat_qmap#dcb_tc#cap");
+ "info#summary#stats#xstats#fdir#stat_qmap#dcb_tc#cap");
cmdline_parse_token_string_t cmd_showportall_all =
TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, all, "all");
cmdline_parse_inst_t cmd_showportall = {
.f = cmd_showportall_parsed,
.data = NULL,
.help_str = "show|clear port "
- "info|stats|xstats|fdir|stat_qmap|dcb_tc|cap all",
+ "info|summary|stats|xstats|fdir|stat_qmap|dcb_tc|cap all",
.tokens = {
(void *)&cmd_showportall_show,
(void *)&cmd_showportall_port,
@@ -7137,6 +7286,10 @@ static void cmd_showport_parsed(void *parsed_result,
nic_xstats_clear(res->portnum);
} else if (!strcmp(res->what, "info"))
port_infos_display(res->portnum);
+ else if (!strcmp(res->what, "summary")) {
+ port_summary_header_display();
+ port_summary_display(res->portnum);
+ }
else if (!strcmp(res->what, "stats"))
nic_stats_display(res->portnum);
else if (!strcmp(res->what, "xstats"))
@@ -7158,7 +7311,7 @@ cmdline_parse_token_string_t cmd_showport_port =
TOKEN_STRING_INITIALIZER(struct cmd_showport_result, port, "port");
cmdline_parse_token_string_t cmd_showport_what =
TOKEN_STRING_INITIALIZER(struct cmd_showport_result, what,
- "info#stats#xstats#fdir#stat_qmap#dcb_tc#cap");
+ "info#summary#stats#xstats#fdir#stat_qmap#dcb_tc#cap");
cmdline_parse_token_num_t cmd_showport_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, UINT16);
@@ -7166,7 +7319,7 @@ cmdline_parse_inst_t cmd_showport = {
.f = cmd_showport_parsed,
.data = NULL,
.help_str = "show|clear port "
- "info|stats|xstats|fdir|stat_qmap|dcb_tc|cap "
+ "info|summary|stats|xstats|fdir|stat_qmap|dcb_tc|cap "
"<port_id>",
.tokens = {
(void *)&cmd_showport_show,
@@ -7573,7 +7726,6 @@ static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
struct cmdline *cl,
__attribute__((unused)) void *data)
{
- pmd_test_exit();
cmdline_quit(cl);
}
@@ -15186,6 +15338,631 @@ cmdline_parse_inst_t cmd_set_nvgre_with_vlan = {
},
};
+/** Set L2 encapsulation details */
+struct cmd_set_l2_encap_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t l2_encap;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t vlan_present:1;
+ uint16_t tci;
+ struct ether_addr eth_src;
+ struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_l2_encap_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, set, "set");
+cmdline_parse_token_string_t cmd_set_l2_encap_l2_encap =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, l2_encap, "l2_encap");
+cmdline_parse_token_string_t cmd_set_l2_encap_l2_encap_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, l2_encap,
+ "l2_encap-with-vlan");
+cmdline_parse_token_string_t cmd_set_l2_encap_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, pos_token,
+ "ip-version");
+cmdline_parse_token_string_t cmd_set_l2_encap_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, ip_version,
+ "ipv4#ipv6");
+cmdline_parse_token_string_t cmd_set_l2_encap_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, pos_token,
+ "vlan-tci");
+cmdline_parse_token_num_t cmd_set_l2_encap_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_l2_encap_result, tci, UINT16);
+cmdline_parse_token_string_t cmd_set_l2_encap_eth_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, pos_token,
+ "eth-src");
+cmdline_parse_token_etheraddr_t cmd_set_l2_encap_eth_src_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_l2_encap_result, eth_src);
+cmdline_parse_token_string_t cmd_set_l2_encap_eth_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, pos_token,
+ "eth-dst");
+cmdline_parse_token_etheraddr_t cmd_set_l2_encap_eth_dst_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_l2_encap_result, eth_dst);
+
+static void cmd_set_l2_encap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_l2_encap_result *res = parsed_result;
+
+ if (strcmp(res->l2_encap, "l2_encap") == 0)
+ l2_encap_conf.select_vlan = 0;
+ else if (strcmp(res->l2_encap, "l2_encap-with-vlan") == 0)
+ l2_encap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ l2_encap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ l2_encap_conf.select_ipv4 = 0;
+ else
+ return;
+ if (l2_encap_conf.select_vlan)
+ l2_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
+ rte_memcpy(l2_encap_conf.eth_src, res->eth_src.addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(l2_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+ ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_l2_encap = {
+ .f = cmd_set_l2_encap_parsed,
+ .data = NULL,
+ .help_str = "set l2_encap ip-version ipv4|ipv6"
+ " eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_l2_encap_set,
+ (void *)&cmd_set_l2_encap_l2_encap,
+ (void *)&cmd_set_l2_encap_ip_version,
+ (void *)&cmd_set_l2_encap_ip_version_value,
+ (void *)&cmd_set_l2_encap_eth_src,
+ (void *)&cmd_set_l2_encap_eth_src_value,
+ (void *)&cmd_set_l2_encap_eth_dst,
+ (void *)&cmd_set_l2_encap_eth_dst_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_l2_encap_with_vlan = {
+ .f = cmd_set_l2_encap_parsed,
+ .data = NULL,
+ .help_str = "set l2_encap-with-vlan ip-version ipv4|ipv6"
+ " vlan-tci <vlan-tci> eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_l2_encap_set,
+ (void *)&cmd_set_l2_encap_l2_encap_with_vlan,
+ (void *)&cmd_set_l2_encap_ip_version,
+ (void *)&cmd_set_l2_encap_ip_version_value,
+ (void *)&cmd_set_l2_encap_vlan,
+ (void *)&cmd_set_l2_encap_vlan_value,
+ (void *)&cmd_set_l2_encap_eth_src,
+ (void *)&cmd_set_l2_encap_eth_src_value,
+ (void *)&cmd_set_l2_encap_eth_dst,
+ (void *)&cmd_set_l2_encap_eth_dst_value,
+ NULL,
+ },
+};
+
+/** Set L2 decapsulation details */
+struct cmd_set_l2_decap_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t l2_decap;
+ cmdline_fixed_string_t pos_token;
+ uint32_t vlan_present:1;
+};
+
+cmdline_parse_token_string_t cmd_set_l2_decap_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_decap_result, set, "set");
+cmdline_parse_token_string_t cmd_set_l2_decap_l2_decap =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_decap_result, l2_decap,
+ "l2_decap");
+cmdline_parse_token_string_t cmd_set_l2_decap_l2_decap_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_l2_decap_result, l2_decap,
+ "l2_decap-with-vlan");
+
+static void cmd_set_l2_decap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_l2_decap_result *res = parsed_result;
+
+ if (strcmp(res->l2_decap, "l2_decap") == 0)
+ l2_decap_conf.select_vlan = 0;
+ else if (strcmp(res->l2_decap, "l2_decap-with-vlan") == 0)
+ l2_decap_conf.select_vlan = 1;
+}
+
+cmdline_parse_inst_t cmd_set_l2_decap = {
+ .f = cmd_set_l2_decap_parsed,
+ .data = NULL,
+ .help_str = "set l2_decap",
+ .tokens = {
+ (void *)&cmd_set_l2_decap_set,
+ (void *)&cmd_set_l2_decap_l2_decap,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_l2_decap_with_vlan = {
+ .f = cmd_set_l2_decap_parsed,
+ .data = NULL,
+ .help_str = "set l2_decap-with-vlan",
+ .tokens = {
+ (void *)&cmd_set_l2_decap_set,
+ (void *)&cmd_set_l2_decap_l2_decap_with_vlan,
+ NULL,
+ },
+};
+
+/** Set MPLSoGRE encapsulation details */
+struct cmd_set_mplsogre_encap_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t mplsogre;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t vlan_present:1;
+ uint32_t label;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t tci;
+ struct ether_addr eth_src;
+ struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, set,
+ "set");
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_mplsogre_encap =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, mplsogre,
+ "mplsogre_encap");
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_mplsogre_encap_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ mplsogre, "mplsogre_encap-with-vlan");
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ pos_token, "ip-version");
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ ip_version, "ipv4#ipv6");
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_label =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ pos_token, "label");
+cmdline_parse_token_num_t cmd_set_mplsogre_encap_label_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mplsogre_encap_result, label,
+ UINT32);
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_ip_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ pos_token, "ip-src");
+cmdline_parse_token_ipaddr_t cmd_set_mplsogre_encap_ip_src_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsogre_encap_result, ip_src);
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_ip_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ pos_token, "ip-dst");
+cmdline_parse_token_ipaddr_t cmd_set_mplsogre_encap_ip_dst_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsogre_encap_result, ip_dst);
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ pos_token, "vlan-tci");
+cmdline_parse_token_num_t cmd_set_mplsogre_encap_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mplsogre_encap_result, tci,
+ UINT16);
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_eth_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ pos_token, "eth-src");
+cmdline_parse_token_etheraddr_t cmd_set_mplsogre_encap_eth_src_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ eth_src);
+cmdline_parse_token_string_t cmd_set_mplsogre_encap_eth_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ pos_token, "eth-dst");
+cmdline_parse_token_etheraddr_t cmd_set_mplsogre_encap_eth_dst_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsogre_encap_result,
+ eth_dst);
+
+static void cmd_set_mplsogre_encap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_mplsogre_encap_result *res = parsed_result;
+ union {
+ uint32_t mplsogre_label;
+ uint8_t label[3];
+ } id = {
+ .mplsogre_label =
+ rte_cpu_to_be_32(res->label) & RTE_BE32(0x00ffffff),
+ };
+
+ if (strcmp(res->mplsogre, "mplsogre_encap") == 0)
+ mplsogre_encap_conf.select_vlan = 0;
+ else if (strcmp(res->mplsogre, "mplsogre_encap-with-vlan") == 0)
+ mplsogre_encap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ mplsogre_encap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ mplsogre_encap_conf.select_ipv4 = 0;
+ else
+ return;
+ rte_memcpy(mplsogre_encap_conf.label, &id.label[1], 3);
+ if (mplsogre_encap_conf.select_ipv4) {
+ IPV4_ADDR_TO_UINT(res->ip_src, mplsogre_encap_conf.ipv4_src);
+ IPV4_ADDR_TO_UINT(res->ip_dst, mplsogre_encap_conf.ipv4_dst);
+ } else {
+ IPV6_ADDR_TO_ARRAY(res->ip_src, mplsogre_encap_conf.ipv6_src);
+ IPV6_ADDR_TO_ARRAY(res->ip_dst, mplsogre_encap_conf.ipv6_dst);
+ }
+ if (mplsogre_encap_conf.select_vlan)
+ mplsogre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
+ rte_memcpy(mplsogre_encap_conf.eth_src, res->eth_src.addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(mplsogre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+ ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_mplsogre_encap = {
+ .f = cmd_set_mplsogre_encap_parsed,
+ .data = NULL,
+ .help_str = "set mplsogre_encap ip-version ipv4|ipv6 label <label>"
+ " ip-src <ip-src> ip-dst <ip-dst> eth-src <eth-src>"
+ " eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_mplsogre_encap_set,
+ (void *)&cmd_set_mplsogre_encap_mplsogre_encap,
+ (void *)&cmd_set_mplsogre_encap_ip_version,
+ (void *)&cmd_set_mplsogre_encap_ip_version_value,
+ (void *)&cmd_set_mplsogre_encap_label,
+ (void *)&cmd_set_mplsogre_encap_label_value,
+ (void *)&cmd_set_mplsogre_encap_ip_src,
+ (void *)&cmd_set_mplsogre_encap_ip_src_value,
+ (void *)&cmd_set_mplsogre_encap_ip_dst,
+ (void *)&cmd_set_mplsogre_encap_ip_dst_value,
+ (void *)&cmd_set_mplsogre_encap_eth_src,
+ (void *)&cmd_set_mplsogre_encap_eth_src_value,
+ (void *)&cmd_set_mplsogre_encap_eth_dst,
+ (void *)&cmd_set_mplsogre_encap_eth_dst_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_mplsogre_encap_with_vlan = {
+ .f = cmd_set_mplsogre_encap_parsed,
+ .data = NULL,
+ .help_str = "set mplsogre_encap-with-vlan ip-version ipv4|ipv6"
+ " label <label> ip-src <ip-src> ip-dst <ip-dst>"
+ " vlan-tci <vlan-tci> eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_mplsogre_encap_set,
+ (void *)&cmd_set_mplsogre_encap_mplsogre_encap_with_vlan,
+ (void *)&cmd_set_mplsogre_encap_ip_version,
+ (void *)&cmd_set_mplsogre_encap_ip_version_value,
+ (void *)&cmd_set_mplsogre_encap_label,
+ (void *)&cmd_set_mplsogre_encap_label_value,
+ (void *)&cmd_set_mplsogre_encap_ip_src,
+ (void *)&cmd_set_mplsogre_encap_ip_src_value,
+ (void *)&cmd_set_mplsogre_encap_ip_dst,
+ (void *)&cmd_set_mplsogre_encap_ip_dst_value,
+ (void *)&cmd_set_mplsogre_encap_vlan,
+ (void *)&cmd_set_mplsogre_encap_vlan_value,
+ (void *)&cmd_set_mplsogre_encap_eth_src,
+ (void *)&cmd_set_mplsogre_encap_eth_src_value,
+ (void *)&cmd_set_mplsogre_encap_eth_dst,
+ (void *)&cmd_set_mplsogre_encap_eth_dst_value,
+ NULL,
+ },
+};
+
+/** Set MPLSoGRE decapsulation details */
+struct cmd_set_mplsogre_decap_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t mplsogre;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t vlan_present:1;
+};
+
+cmdline_parse_token_string_t cmd_set_mplsogre_decap_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result, set,
+ "set");
+cmdline_parse_token_string_t cmd_set_mplsogre_decap_mplsogre_decap =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result, mplsogre,
+ "mplsogre_decap");
+cmdline_parse_token_string_t cmd_set_mplsogre_decap_mplsogre_decap_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result,
+ mplsogre, "mplsogre_decap-with-vlan");
+cmdline_parse_token_string_t cmd_set_mplsogre_decap_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result,
+ pos_token, "ip-version");
+cmdline_parse_token_string_t cmd_set_mplsogre_decap_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result,
+ ip_version, "ipv4#ipv6");
+
+static void cmd_set_mplsogre_decap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_mplsogre_decap_result *res = parsed_result;
+
+ if (strcmp(res->mplsogre, "mplsogre_decap") == 0)
+ mplsogre_decap_conf.select_vlan = 0;
+ else if (strcmp(res->mplsogre, "mplsogre_decap-with-vlan") == 0)
+ mplsogre_decap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ mplsogre_decap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ mplsogre_decap_conf.select_ipv4 = 0;
+}
+
+cmdline_parse_inst_t cmd_set_mplsogre_decap = {
+ .f = cmd_set_mplsogre_decap_parsed,
+ .data = NULL,
+ .help_str = "set mplsogre_decap ip-version ipv4|ipv6",
+ .tokens = {
+ (void *)&cmd_set_mplsogre_decap_set,
+ (void *)&cmd_set_mplsogre_decap_mplsogre_decap,
+ (void *)&cmd_set_mplsogre_decap_ip_version,
+ (void *)&cmd_set_mplsogre_decap_ip_version_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_mplsogre_decap_with_vlan = {
+ .f = cmd_set_mplsogre_decap_parsed,
+ .data = NULL,
+ .help_str = "set mplsogre_decap-with-vlan ip-version ipv4|ipv6",
+ .tokens = {
+ (void *)&cmd_set_mplsogre_decap_set,
+ (void *)&cmd_set_mplsogre_decap_mplsogre_decap_with_vlan,
+ (void *)&cmd_set_mplsogre_decap_ip_version,
+ (void *)&cmd_set_mplsogre_decap_ip_version_value,
+ NULL,
+ },
+};
+
+/** Set MPLSoUDP encapsulation details */
+struct cmd_set_mplsoudp_encap_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t mplsoudp;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t vlan_present:1;
+ uint32_t label;
+ uint16_t udp_src;
+ uint16_t udp_dst;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t tci;
+ struct ether_addr eth_src;
+ struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, set,
+ "set");
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_mplsoudp_encap =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, mplsoudp,
+ "mplsoudp_encap");
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_mplsoudp_encap_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ mplsoudp, "mplsoudp_encap-with-vlan");
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "ip-version");
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ ip_version, "ipv4#ipv6");
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_label =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "label");
+cmdline_parse_token_num_t cmd_set_mplsoudp_encap_label_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mplsoudp_encap_result, label,
+ UINT32);
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_udp_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "udp-src");
+cmdline_parse_token_num_t cmd_set_mplsoudp_encap_udp_src_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mplsoudp_encap_result, udp_src,
+ UINT16);
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_udp_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "udp-dst");
+cmdline_parse_token_num_t cmd_set_mplsoudp_encap_udp_dst_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mplsoudp_encap_result, udp_dst,
+ UINT16);
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_ip_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "ip-src");
+cmdline_parse_token_ipaddr_t cmd_set_mplsoudp_encap_ip_src_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsoudp_encap_result, ip_src);
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_ip_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "ip-dst");
+cmdline_parse_token_ipaddr_t cmd_set_mplsoudp_encap_ip_dst_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsoudp_encap_result, ip_dst);
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "vlan-tci");
+cmdline_parse_token_num_t cmd_set_mplsoudp_encap_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mplsoudp_encap_result, tci,
+ UINT16);
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_eth_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "eth-src");
+cmdline_parse_token_etheraddr_t cmd_set_mplsoudp_encap_eth_src_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ eth_src);
+cmdline_parse_token_string_t cmd_set_mplsoudp_encap_eth_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ pos_token, "eth-dst");
+cmdline_parse_token_etheraddr_t cmd_set_mplsoudp_encap_eth_dst_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsoudp_encap_result,
+ eth_dst);
+
+static void cmd_set_mplsoudp_encap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_mplsoudp_encap_result *res = parsed_result;
+ union {
+ uint32_t mplsoudp_label;
+ uint8_t label[3];
+ } id = {
+ .mplsoudp_label =
+ rte_cpu_to_be_32(res->label) & RTE_BE32(0x00ffffff),
+ };
+
+ if (strcmp(res->mplsoudp, "mplsoudp_encap") == 0)
+ mplsoudp_encap_conf.select_vlan = 0;
+ else if (strcmp(res->mplsoudp, "mplsoudp_encap-with-vlan") == 0)
+ mplsoudp_encap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ mplsoudp_encap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ mplsoudp_encap_conf.select_ipv4 = 0;
+ else
+ return;
+ rte_memcpy(mplsoudp_encap_conf.label, &id.label[1], 3);
+ mplsoudp_encap_conf.udp_src = rte_cpu_to_be_16(res->udp_src);
+ mplsoudp_encap_conf.udp_dst = rte_cpu_to_be_16(res->udp_dst);
+ if (mplsoudp_encap_conf.select_ipv4) {
+ IPV4_ADDR_TO_UINT(res->ip_src, mplsoudp_encap_conf.ipv4_src);
+ IPV4_ADDR_TO_UINT(res->ip_dst, mplsoudp_encap_conf.ipv4_dst);
+ } else {
+ IPV6_ADDR_TO_ARRAY(res->ip_src, mplsoudp_encap_conf.ipv6_src);
+ IPV6_ADDR_TO_ARRAY(res->ip_dst, mplsoudp_encap_conf.ipv6_dst);
+ }
+ if (mplsoudp_encap_conf.select_vlan)
+ mplsoudp_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
+ rte_memcpy(mplsoudp_encap_conf.eth_src, res->eth_src.addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(mplsoudp_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+ ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_mplsoudp_encap = {
+ .f = cmd_set_mplsoudp_encap_parsed,
+ .data = NULL,
+ .help_str = "set mplsoudp_encap ip-version ipv4|ipv6 label <label>"
+ " udp-src <udp-src> udp-dst <udp-dst> ip-src <ip-src>"
+ " ip-dst <ip-dst> eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_mplsoudp_encap_set,
+ (void *)&cmd_set_mplsoudp_encap_mplsoudp_encap,
+ (void *)&cmd_set_mplsoudp_encap_ip_version,
+ (void *)&cmd_set_mplsoudp_encap_ip_version_value,
+ (void *)&cmd_set_mplsoudp_encap_label,
+ (void *)&cmd_set_mplsoudp_encap_label_value,
+ (void *)&cmd_set_mplsoudp_encap_udp_src,
+ (void *)&cmd_set_mplsoudp_encap_udp_src_value,
+ (void *)&cmd_set_mplsoudp_encap_udp_dst,
+ (void *)&cmd_set_mplsoudp_encap_udp_dst_value,
+ (void *)&cmd_set_mplsoudp_encap_ip_src,
+ (void *)&cmd_set_mplsoudp_encap_ip_src_value,
+ (void *)&cmd_set_mplsoudp_encap_ip_dst,
+ (void *)&cmd_set_mplsoudp_encap_ip_dst_value,
+ (void *)&cmd_set_mplsoudp_encap_eth_src,
+ (void *)&cmd_set_mplsoudp_encap_eth_src_value,
+ (void *)&cmd_set_mplsoudp_encap_eth_dst,
+ (void *)&cmd_set_mplsoudp_encap_eth_dst_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_mplsoudp_encap_with_vlan = {
+ .f = cmd_set_mplsoudp_encap_parsed,
+ .data = NULL,
+ .help_str = "set mplsoudp_encap-with-vlan ip-version ipv4|ipv6"
+ " label <label> udp-src <udp-src> udp-dst <udp-dst>"
+ " ip-src <ip-src> ip-dst <ip-dst> vlan-tci <vlan-tci>"
+ " eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_mplsoudp_encap_set,
+ (void *)&cmd_set_mplsoudp_encap_mplsoudp_encap_with_vlan,
+ (void *)&cmd_set_mplsoudp_encap_ip_version,
+ (void *)&cmd_set_mplsoudp_encap_ip_version_value,
+ (void *)&cmd_set_mplsoudp_encap_label,
+ (void *)&cmd_set_mplsoudp_encap_label_value,
+ (void *)&cmd_set_mplsoudp_encap_udp_src,
+ (void *)&cmd_set_mplsoudp_encap_udp_src_value,
+ (void *)&cmd_set_mplsoudp_encap_udp_dst,
+ (void *)&cmd_set_mplsoudp_encap_udp_dst_value,
+ (void *)&cmd_set_mplsoudp_encap_ip_src,
+ (void *)&cmd_set_mplsoudp_encap_ip_src_value,
+ (void *)&cmd_set_mplsoudp_encap_ip_dst,
+ (void *)&cmd_set_mplsoudp_encap_ip_dst_value,
+ (void *)&cmd_set_mplsoudp_encap_vlan,
+ (void *)&cmd_set_mplsoudp_encap_vlan_value,
+ (void *)&cmd_set_mplsoudp_encap_eth_src,
+ (void *)&cmd_set_mplsoudp_encap_eth_src_value,
+ (void *)&cmd_set_mplsoudp_encap_eth_dst,
+ (void *)&cmd_set_mplsoudp_encap_eth_dst_value,
+ NULL,
+ },
+};
+
+/** Set MPLSoUDP decapsulation details */
+struct cmd_set_mplsoudp_decap_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t mplsoudp;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t vlan_present:1;
+};
+
+cmdline_parse_token_string_t cmd_set_mplsoudp_decap_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result, set,
+ "set");
+cmdline_parse_token_string_t cmd_set_mplsoudp_decap_mplsoudp_decap =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result, mplsoudp,
+ "mplsoudp_decap");
+cmdline_parse_token_string_t cmd_set_mplsoudp_decap_mplsoudp_decap_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result,
+ mplsoudp, "mplsoudp_decap-with-vlan");
+cmdline_parse_token_string_t cmd_set_mplsoudp_decap_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result,
+ pos_token, "ip-version");
+cmdline_parse_token_string_t cmd_set_mplsoudp_decap_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result,
+ ip_version, "ipv4#ipv6");
+
+static void cmd_set_mplsoudp_decap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_mplsoudp_decap_result *res = parsed_result;
+
+ if (strcmp(res->mplsoudp, "mplsoudp_decap") == 0)
+ mplsoudp_decap_conf.select_vlan = 0;
+ else if (strcmp(res->mplsoudp, "mplsoudp_decap-with-vlan") == 0)
+ mplsoudp_decap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ mplsoudp_decap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ mplsoudp_decap_conf.select_ipv4 = 0;
+}
+
+cmdline_parse_inst_t cmd_set_mplsoudp_decap = {
+ .f = cmd_set_mplsoudp_decap_parsed,
+ .data = NULL,
+ .help_str = "set mplsoudp_decap ip-version ipv4|ipv6",
+ .tokens = {
+ (void *)&cmd_set_mplsoudp_decap_set,
+ (void *)&cmd_set_mplsoudp_decap_mplsoudp_decap,
+ (void *)&cmd_set_mplsoudp_decap_ip_version,
+ (void *)&cmd_set_mplsoudp_decap_ip_version_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_mplsoudp_decap_with_vlan = {
+ .f = cmd_set_mplsoudp_decap_parsed,
+ .data = NULL,
+ .help_str = "set mplsoudp_decap-with-vlan ip-version ipv4|ipv6",
+ .tokens = {
+ (void *)&cmd_set_mplsoudp_decap_set,
+ (void *)&cmd_set_mplsoudp_decap_mplsoudp_decap_with_vlan,
+ (void *)&cmd_set_mplsoudp_decap_ip_version,
+ (void *)&cmd_set_mplsoudp_decap_ip_version_value,
+ NULL,
+ },
+};
+
/* Strict link priority scheduling mode setting */
static void
cmd_strict_link_prio_parsed(
@@ -17403,7 +18180,8 @@ cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_offload =
"sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#"
"qinq_insert#vxlan_tnl_tso#gre_tnl_tso#"
"ipip_tnl_tso#geneve_tnl_tso#macsec_insert#"
- "mt_lockfree#multi_segs#mbuf_fast_free#security");
+ "mt_lockfree#multi_segs#mbuf_fast_free#security#"
+ "match_metadata");
cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_config_per_port_tx_offload_result,
@@ -17484,8 +18262,8 @@ cmdline_parse_inst_t cmd_config_per_port_tx_offload = {
"sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|"
"qinq_insert|vxlan_tnl_tso|gre_tnl_tso|"
"ipip_tnl_tso|geneve_tnl_tso|macsec_insert|"
- "mt_lockfree|multi_segs|mbuf_fast_free|security "
- "on|off",
+ "mt_lockfree|multi_segs|mbuf_fast_free|security|"
+ "match_metadata on|off",
.tokens = {
(void *)&cmd_config_per_port_tx_offload_result_port,
(void *)&cmd_config_per_port_tx_offload_result_config,
@@ -17602,6 +18380,113 @@ cmdline_parse_inst_t cmd_config_per_queue_tx_offload = {
}
};
+/* *** configure tx_metadata for specific port *** */
+struct cmd_config_tx_metadata_specific_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ uint16_t port_id;
+ cmdline_fixed_string_t item;
+ uint32_t value;
+};
+
+static void
+cmd_config_tx_metadata_specific_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_tx_metadata_specific_result *res = parsed_result;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+ ports[res->port_id].tx_metadata = rte_cpu_to_be_32(res->value);
+ /* Add/remove callback to insert valid metadata in every Tx packet. */
+ if (ports[res->port_id].tx_metadata)
+ add_tx_md_callback(res->port_id);
+ else
+ remove_tx_md_callback(res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_config_tx_metadata_specific_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tx_metadata_specific_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_tx_metadata_specific_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tx_metadata_specific_result,
+ keyword, "config");
+cmdline_parse_token_num_t cmd_config_tx_metadata_specific_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_tx_metadata_specific_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_tx_metadata_specific_item =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tx_metadata_specific_result,
+ item, "tx_metadata");
+cmdline_parse_token_num_t cmd_config_tx_metadata_specific_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_tx_metadata_specific_result,
+ value, UINT32);
+
+cmdline_parse_inst_t cmd_config_tx_metadata_specific = {
+ .f = cmd_config_tx_metadata_specific_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> tx_metadata <value>",
+ .tokens = {
+ (void *)&cmd_config_tx_metadata_specific_port,
+ (void *)&cmd_config_tx_metadata_specific_keyword,
+ (void *)&cmd_config_tx_metadata_specific_id,
+ (void *)&cmd_config_tx_metadata_specific_item,
+ (void *)&cmd_config_tx_metadata_specific_value,
+ NULL,
+ },
+};
+
+/* *** display tx_metadata per port configuration *** */
+struct cmd_show_tx_metadata_result {
+ cmdline_fixed_string_t cmd_show;
+ cmdline_fixed_string_t cmd_port;
+ cmdline_fixed_string_t cmd_keyword;
+ portid_t cmd_pid;
+};
+
+static void
+cmd_show_tx_metadata_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_tx_metadata_result *res = parsed_result;
+
+ if (!rte_eth_dev_is_valid_port(res->cmd_pid)) {
+ printf("invalid port id %u\n", res->cmd_pid);
+ return;
+ }
+ if (!strcmp(res->cmd_keyword, "tx_metadata")) {
+ printf("Port %u tx_metadata: %u\n", res->cmd_pid,
+ ports[res->cmd_pid].tx_metadata);
+ }
+}
+
+cmdline_parse_token_string_t cmd_show_tx_metadata_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_tx_metadata_result,
+ cmd_show, "show");
+cmdline_parse_token_string_t cmd_show_tx_metadata_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_tx_metadata_result,
+ cmd_port, "port");
+cmdline_parse_token_num_t cmd_show_tx_metadata_pid =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_tx_metadata_result,
+ cmd_pid, UINT16);
+cmdline_parse_token_string_t cmd_show_tx_metadata_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_tx_metadata_result,
+ cmd_keyword, "tx_metadata");
+
+cmdline_parse_inst_t cmd_show_tx_metadata = {
+ .f = cmd_show_tx_metadata_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> tx_metadata",
+ .tokens = {
+ (void *)&cmd_show_tx_metadata_show,
+ (void *)&cmd_show_tx_metadata_port,
+ (void *)&cmd_show_tx_metadata_pid,
+ (void *)&cmd_show_tx_metadata_keyword,
+ NULL,
+ },
+};
+
/* ******************************************************************************** */
/* list of instructions */
@@ -17700,6 +18585,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_operate_specific_port,
(cmdline_parse_inst_t *)&cmd_operate_attach_port,
(cmdline_parse_inst_t *)&cmd_operate_detach_port,
+ (cmdline_parse_inst_t *)&cmd_set_port_setup_on,
(cmdline_parse_inst_t *)&cmd_config_speed_all,
(cmdline_parse_inst_t *)&cmd_config_speed_specific,
(cmdline_parse_inst_t *)&cmd_config_loopback_all,
@@ -17711,6 +18597,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_config_rss,
(cmdline_parse_inst_t *)&cmd_config_rxtx_ring_size,
(cmdline_parse_inst_t *)&cmd_config_rxtx_queue,
+ (cmdline_parse_inst_t *)&cmd_config_deferred_start_rxtx_queue,
(cmdline_parse_inst_t *)&cmd_setup_rxtx_queue,
(cmdline_parse_inst_t *)&cmd_config_rss_reta,
(cmdline_parse_inst_t *)&cmd_showport_reta,
@@ -17814,6 +18701,18 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_set_vxlan_with_vlan,
(cmdline_parse_inst_t *)&cmd_set_nvgre,
(cmdline_parse_inst_t *)&cmd_set_nvgre_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_l2_encap,
+ (cmdline_parse_inst_t *)&cmd_set_l2_encap_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_l2_decap,
+ (cmdline_parse_inst_t *)&cmd_set_l2_decap_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_mplsogre_encap,
+ (cmdline_parse_inst_t *)&cmd_set_mplsogre_encap_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_mplsogre_decap,
+ (cmdline_parse_inst_t *)&cmd_set_mplsogre_decap_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_mplsoudp_encap,
+ (cmdline_parse_inst_t *)&cmd_set_mplsoudp_encap_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_mplsoudp_decap,
+ (cmdline_parse_inst_t *)&cmd_set_mplsoudp_decap_with_vlan,
(cmdline_parse_inst_t *)&cmd_ddp_add,
(cmdline_parse_inst_t *)&cmd_ddp_del,
(cmdline_parse_inst_t *)&cmd_ddp_get_list,
@@ -17854,6 +18753,9 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_suspend_port_tm_node,
(cmdline_parse_inst_t *)&cmd_resume_port_tm_node,
(cmdline_parse_inst_t *)&cmd_port_tm_hierarchy_commit,
+ (cmdline_parse_inst_t *)&cmd_port_tm_mark_ip_ecn,
+ (cmdline_parse_inst_t *)&cmd_port_tm_mark_ip_dscp,
+ (cmdline_parse_inst_t *)&cmd_port_tm_mark_vlan_dei,
(cmdline_parse_inst_t *)&cmd_cfg_tunnel_udp_port,
(cmdline_parse_inst_t *)&cmd_rx_offload_get_capa,
(cmdline_parse_inst_t *)&cmd_rx_offload_get_configuration,
@@ -17867,6 +18769,8 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_operate_bpf_ld_parse,
(cmdline_parse_inst_t *)&cmd_operate_bpf_unld_parse,
#endif
+ (cmdline_parse_inst_t *)&cmd_config_tx_metadata_specific,
+ (cmdline_parse_inst_t *)&cmd_show_tx_metadata,
NULL,
};
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index f9260600..23ea7cc8 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -178,6 +178,8 @@ enum index {
ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
ITEM_ICMP6_ND_OPT_TLA_ETH,
ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
+ ITEM_META,
+ ITEM_META_DATA,
/* Validate/create actions. */
ACTIONS,
@@ -243,6 +245,32 @@ enum index {
ACTION_VXLAN_DECAP,
ACTION_NVGRE_ENCAP,
ACTION_NVGRE_DECAP,
+ ACTION_L2_ENCAP,
+ ACTION_L2_DECAP,
+ ACTION_MPLSOGRE_ENCAP,
+ ACTION_MPLSOGRE_DECAP,
+ ACTION_MPLSOUDP_ENCAP,
+ ACTION_MPLSOUDP_DECAP,
+ ACTION_SET_IPV4_SRC,
+ ACTION_SET_IPV4_SRC_IPV4_SRC,
+ ACTION_SET_IPV4_DST,
+ ACTION_SET_IPV4_DST_IPV4_DST,
+ ACTION_SET_IPV6_SRC,
+ ACTION_SET_IPV6_SRC_IPV6_SRC,
+ ACTION_SET_IPV6_DST,
+ ACTION_SET_IPV6_DST_IPV6_DST,
+ ACTION_SET_TP_SRC,
+ ACTION_SET_TP_SRC_TP_SRC,
+ ACTION_SET_TP_DST,
+ ACTION_SET_TP_DST_TP_DST,
+ ACTION_MAC_SWAP,
+ ACTION_DEC_TTL,
+ ACTION_SET_TTL,
+ ACTION_SET_TTL_TTL,
+ ACTION_SET_MAC_SRC,
+ ACTION_SET_MAC_SRC_MAC_SRC,
+ ACTION_SET_MAC_DST,
+ ACTION_SET_MAC_DST_MAC_DST,
};
/** Maximum size for pattern in struct rte_flow_item_raw. */
@@ -295,6 +323,22 @@ struct action_nvgre_encap_data {
struct rte_flow_item_nvgre item_nvgre;
};
+/** Maximum data size in struct rte_flow_action_raw_encap. */
+#define ACTION_RAW_ENCAP_MAX_DATA 128
+
+/** Storage for struct rte_flow_action_raw_encap including external data. */
+struct action_raw_encap_data {
+ struct rte_flow_action_raw_encap conf;
+ uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
+ uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
+};
+
+/** Storage for struct rte_flow_action_raw_decap including external data. */
+struct action_raw_decap_data {
+ struct rte_flow_action_raw_decap conf;
+ uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
+};
+
/** Maximum number of subsequent tokens and arguments on the stack. */
#define CTX_STACK_SIZE 16
@@ -564,6 +608,7 @@ static const enum index next_item[] = {
ITEM_ICMP6_ND_OPT,
ITEM_ICMP6_ND_OPT_SLA_ETH,
ITEM_ICMP6_ND_OPT_TLA_ETH,
+ ITEM_META,
ZERO,
};
@@ -784,6 +829,12 @@ static const enum index item_icmp6_nd_opt_tla_eth[] = {
ZERO,
};
+static const enum index item_meta[] = {
+ ITEM_META_DATA,
+ ITEM_NEXT,
+ ZERO,
+};
+
static const enum index next_action[] = {
ACTION_END,
ACTION_VOID,
@@ -816,6 +867,23 @@ static const enum index next_action[] = {
ACTION_VXLAN_DECAP,
ACTION_NVGRE_ENCAP,
ACTION_NVGRE_DECAP,
+ ACTION_L2_ENCAP,
+ ACTION_L2_DECAP,
+ ACTION_MPLSOGRE_ENCAP,
+ ACTION_MPLSOGRE_DECAP,
+ ACTION_MPLSOUDP_ENCAP,
+ ACTION_MPLSOUDP_DECAP,
+ ACTION_SET_IPV4_SRC,
+ ACTION_SET_IPV4_DST,
+ ACTION_SET_IPV6_SRC,
+ ACTION_SET_IPV6_DST,
+ ACTION_SET_TP_SRC,
+ ACTION_SET_TP_DST,
+ ACTION_MAC_SWAP,
+ ACTION_DEC_TTL,
+ ACTION_SET_TTL,
+ ACTION_SET_MAC_SRC,
+ ACTION_SET_MAC_DST,
ZERO,
};
@@ -918,12 +986,66 @@ static const enum index action_of_push_mpls[] = {
ZERO,
};
+static const enum index action_set_ipv4_src[] = {
+ ACTION_SET_IPV4_SRC_IPV4_SRC,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_set_mac_src[] = {
+ ACTION_SET_MAC_SRC_MAC_SRC,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_set_ipv4_dst[] = {
+ ACTION_SET_IPV4_DST_IPV4_DST,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_set_ipv6_src[] = {
+ ACTION_SET_IPV6_SRC_IPV6_SRC,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_set_ipv6_dst[] = {
+ ACTION_SET_IPV6_DST_IPV6_DST,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_set_tp_src[] = {
+ ACTION_SET_TP_SRC_TP_SRC,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_set_tp_dst[] = {
+ ACTION_SET_TP_DST_TP_DST,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_set_ttl[] = {
+ ACTION_SET_TTL_TTL,
+ ACTION_NEXT,
+ ZERO,
+};
+
static const enum index action_jump[] = {
ACTION_JUMP_GROUP,
ACTION_NEXT,
ZERO,
};
+static const enum index action_set_mac_dst[] = {
+ ACTION_SET_MAC_DST_MAC_DST,
+ ACTION_NEXT,
+ ZERO,
+};
+
static int parse_init(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -952,6 +1074,24 @@ static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
const char *, unsigned int, void *,
unsigned int);
+static int parse_vc_action_l2_encap(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_l2_decap(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_mplsogre_encap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
+static int parse_vc_action_mplsogre_decap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
+static int parse_vc_action_mplsoudp_encap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
+static int parse_vc_action_mplsoudp_decap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
static int parse_destroy(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -1985,6 +2125,20 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON
(struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
},
+ [ITEM_META] = {
+ .name = "meta",
+ .help = "match metadata header",
+ .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
+ .next = NEXT(item_meta),
+ .call = parse_vc,
+ },
+ [ITEM_META_DATA] = {
+ .name = "data",
+ .help = "metadata value",
+ .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
+ data, "\xff\xff\xff\xff")),
+ },
/* Validate/create actions. */
[ACTIONS] = {
@@ -2470,6 +2624,225 @@ static const struct token token_list[] = {
.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
.call = parse_vc,
},
+ [ACTION_L2_ENCAP] = {
+ .name = "l2_encap",
+ .help = "l2 encap, uses configuration set by"
+ " \"set l2_encap\"",
+ .priv = PRIV_ACTION(RAW_ENCAP,
+ sizeof(struct action_raw_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_l2_encap,
+ },
+ [ACTION_L2_DECAP] = {
+ .name = "l2_decap",
+ .help = "l2 decap, uses configuration set by"
+ " \"set l2_decap\"",
+ .priv = PRIV_ACTION(RAW_DECAP,
+ sizeof(struct action_raw_decap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_l2_decap,
+ },
+ [ACTION_MPLSOGRE_ENCAP] = {
+ .name = "mplsogre_encap",
+ .help = "mplsogre encapsulation, uses configuration set by"
+ " \"set mplsogre_encap\"",
+ .priv = PRIV_ACTION(RAW_ENCAP,
+ sizeof(struct action_raw_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsogre_encap,
+ },
+ [ACTION_MPLSOGRE_DECAP] = {
+ .name = "mplsogre_decap",
+ .help = "mplsogre decapsulation, uses configuration set by"
+ " \"set mplsogre_decap\"",
+ .priv = PRIV_ACTION(RAW_DECAP,
+ sizeof(struct action_raw_decap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsogre_decap,
+ },
+ [ACTION_MPLSOUDP_ENCAP] = {
+ .name = "mplsoudp_encap",
+ .help = "mplsoudp encapsulation, uses configuration set by"
+ " \"set mplsoudp_encap\"",
+ .priv = PRIV_ACTION(RAW_ENCAP,
+ sizeof(struct action_raw_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsoudp_encap,
+ },
+ [ACTION_MPLSOUDP_DECAP] = {
+ .name = "mplsoudp_decap",
+ .help = "mplsoudp decapsulation, uses configuration set by"
+ " \"set mplsoudp_decap\"",
+ .priv = PRIV_ACTION(RAW_DECAP,
+ sizeof(struct action_raw_decap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsoudp_decap,
+ },
+ [ACTION_SET_IPV4_SRC] = {
+ .name = "set_ipv4_src",
+ .help = "Set a new IPv4 source address in the outermost"
+ " IPv4 header",
+ .priv = PRIV_ACTION(SET_IPV4_SRC,
+ sizeof(struct rte_flow_action_set_ipv4)),
+ .next = NEXT(action_set_ipv4_src),
+ .call = parse_vc,
+ },
+ [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
+ .name = "ipv4_addr",
+ .help = "new IPv4 source address to set",
+ .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_ipv4, ipv4_addr)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_SET_IPV4_DST] = {
+ .name = "set_ipv4_dst",
+ .help = "Set a new IPv4 destination address in the outermost"
+ " IPv4 header",
+ .priv = PRIV_ACTION(SET_IPV4_DST,
+ sizeof(struct rte_flow_action_set_ipv4)),
+ .next = NEXT(action_set_ipv4_dst),
+ .call = parse_vc,
+ },
+ [ACTION_SET_IPV4_DST_IPV4_DST] = {
+ .name = "ipv4_addr",
+ .help = "new IPv4 destination address to set",
+ .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_ipv4, ipv4_addr)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_SET_IPV6_SRC] = {
+ .name = "set_ipv6_src",
+ .help = "Set a new IPv6 source address in the outermost"
+ " IPv6 header",
+ .priv = PRIV_ACTION(SET_IPV6_SRC,
+ sizeof(struct rte_flow_action_set_ipv6)),
+ .next = NEXT(action_set_ipv6_src),
+ .call = parse_vc,
+ },
+ [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
+ .name = "ipv6_addr",
+ .help = "new IPv6 source address to set",
+ .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_ipv6, ipv6_addr)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_SET_IPV6_DST] = {
+ .name = "set_ipv6_dst",
+ .help = "Set a new IPv6 destination address in the outermost"
+ " IPv6 header",
+ .priv = PRIV_ACTION(SET_IPV6_DST,
+ sizeof(struct rte_flow_action_set_ipv6)),
+ .next = NEXT(action_set_ipv6_dst),
+ .call = parse_vc,
+ },
+ [ACTION_SET_IPV6_DST_IPV6_DST] = {
+ .name = "ipv6_addr",
+ .help = "new IPv6 destination address to set",
+ .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_ipv6, ipv6_addr)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_SET_TP_SRC] = {
+ .name = "set_tp_src",
+ .help = "set a new source port number in the outermost"
+ " TCP/UDP header",
+ .priv = PRIV_ACTION(SET_TP_SRC,
+ sizeof(struct rte_flow_action_set_tp)),
+ .next = NEXT(action_set_tp_src),
+ .call = parse_vc,
+ },
+ [ACTION_SET_TP_SRC_TP_SRC] = {
+ .name = "port",
+ .help = "new source port number to set",
+ .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_tp, port)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_SET_TP_DST] = {
+ .name = "set_tp_dst",
+ .help = "set a new destination port number in the outermost"
+ " TCP/UDP header",
+ .priv = PRIV_ACTION(SET_TP_DST,
+ sizeof(struct rte_flow_action_set_tp)),
+ .next = NEXT(action_set_tp_dst),
+ .call = parse_vc,
+ },
+ [ACTION_SET_TP_DST_TP_DST] = {
+ .name = "port",
+ .help = "new destination port number to set",
+ .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_tp, port)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_MAC_SWAP] = {
+ .name = "mac_swap",
+ .help = "Swap the source and destination MAC addresses"
+ " in the outermost Ethernet header",
+ .priv = PRIV_ACTION(MAC_SWAP, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_DEC_TTL] = {
+ .name = "dec_ttl",
+ .help = "decrease network TTL if available",
+ .priv = PRIV_ACTION(DEC_TTL, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_SET_TTL] = {
+ .name = "set_ttl",
+ .help = "set ttl value",
+ .priv = PRIV_ACTION(SET_TTL,
+ sizeof(struct rte_flow_action_set_ttl)),
+ .next = NEXT(action_set_ttl),
+ .call = parse_vc,
+ },
+ [ACTION_SET_TTL_TTL] = {
+ .name = "ttl_value",
+ .help = "new ttl value to set",
+ .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_ttl, ttl_value)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_SET_MAC_SRC] = {
+ .name = "set_mac_src",
+ .help = "set source mac address",
+ .priv = PRIV_ACTION(SET_MAC_SRC,
+ sizeof(struct rte_flow_action_set_mac)),
+ .next = NEXT(action_set_mac_src),
+ .call = parse_vc,
+ },
+ [ACTION_SET_MAC_SRC_MAC_SRC] = {
+ .name = "mac_addr",
+ .help = "new source mac address",
+ .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_mac, mac_addr)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_SET_MAC_DST] = {
+ .name = "set_mac_dst",
+ .help = "set destination mac address",
+ .priv = PRIV_ACTION(SET_MAC_DST,
+ sizeof(struct rte_flow_action_set_mac)),
+ .next = NEXT(action_set_mac_dst),
+ .call = parse_vc,
+ },
+ [ACTION_SET_MAC_DST_MAC_DST] = {
+ .name = "mac_addr",
+ .help = "new destination mac address to set",
+ .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_set_mac, mac_addr)),
+ .call = parse_vc_conf,
+ },
};
/** Remove and return last entry from argument stack. */
@@ -3225,6 +3598,503 @@ parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
return ret;
}
+/** Parse l2 encap action. */
+static int
+parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_encap_data *action_encap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsoudp_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_encap_data = ctx->object;
+ *action_encap_data = (struct action_raw_encap_data) {
+ .conf = (struct rte_flow_action_raw_encap){
+ .data = action_encap_data->data,
+ },
+ .data = {},
+ };
+ header = action_encap_data->data;
+ if (l2_encap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (l2_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ l2_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ l2_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, &eth, sizeof(eth));
+ header += sizeof(eth);
+ if (l2_encap_conf.select_vlan) {
+ if (l2_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ action_encap_data->conf.size = header -
+ action_encap_data->data;
+ action->conf = &action_encap_data->conf;
+ return ret;
+}
+
+/** Parse l2 decap action. */
+static int
+parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_decap_data *action_decap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsoudp_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_decap_data = ctx->object;
+ *action_decap_data = (struct action_raw_decap_data) {
+ .conf = (struct rte_flow_action_raw_decap){
+ .data = action_decap_data->data,
+ },
+ .data = {},
+ };
+ header = action_decap_data->data;
+ if (l2_decap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ memcpy(header, &eth, sizeof(eth));
+ header += sizeof(eth);
+ if (l2_decap_conf.select_vlan) {
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ action_decap_data->conf.size = header -
+ action_decap_data->data;
+ action->conf = &action_decap_data->conf;
+ return ret;
+}
+
+#define ETHER_TYPE_MPLS_UNICAST 0x8847
+
+/** Parse MPLSOGRE encap action. */
+static int
+parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_encap_data *action_encap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsogre_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .src_addr = mplsogre_encap_conf.ipv4_src,
+ .dst_addr = mplsogre_encap_conf.ipv4_dst,
+ .next_proto_id = IPPROTO_GRE,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IPPROTO_GRE,
+ },
+ };
+ struct rte_flow_item_gre gre = {
+ .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_encap_data = ctx->object;
+ *action_encap_data = (struct action_raw_encap_data) {
+ .conf = (struct rte_flow_action_raw_encap){
+ .data = action_encap_data->data,
+ },
+ .data = {},
+ .preserve = {},
+ };
+ header = action_encap_data->data;
+ if (mplsogre_encap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsogre_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, &eth, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsogre_encap_conf.select_vlan) {
+ if (mplsogre_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsogre_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(&ipv6.hdr.src_addr,
+ &mplsogre_encap_conf.ipv6_src,
+ sizeof(mplsogre_encap_conf.ipv6_src));
+ memcpy(&ipv6.hdr.dst_addr,
+ &mplsogre_encap_conf.ipv6_dst,
+ sizeof(mplsogre_encap_conf.ipv6_dst));
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &gre, sizeof(gre));
+ header += sizeof(gre);
+ memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
+ RTE_DIM(mplsogre_encap_conf.label));
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_encap_data->conf.size = header -
+ action_encap_data->data;
+ action->conf = &action_encap_data->conf;
+ return ret;
+}
+
+/** Parse MPLSOGRE decap action. */
+static int
+parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_decap_data *action_decap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {.tci = 0};
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .next_proto_id = IPPROTO_GRE,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IPPROTO_GRE,
+ },
+ };
+ struct rte_flow_item_gre gre = {
+ .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_decap_data = ctx->object;
+ *action_decap_data = (struct action_raw_decap_data) {
+ .conf = (struct rte_flow_action_raw_decap){
+ .data = action_decap_data->data,
+ },
+ .data = {},
+ };
+ header = action_decap_data->data;
+ if (mplsogre_decap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsogre_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, &eth, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsogre_encap_conf.select_vlan) {
+ if (mplsogre_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsogre_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &gre, sizeof(gre));
+ header += sizeof(gre);
+ memset(&mpls, 0, sizeof(mpls));
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_decap_data->conf.size = header -
+ action_decap_data->data;
+ action->conf = &action_decap_data->conf;
+ return ret;
+}
+
+/** Parse MPLSOUDP encap action. */
+static int
+parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_encap_data *action_encap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsoudp_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .src_addr = mplsoudp_encap_conf.ipv4_src,
+ .dst_addr = mplsoudp_encap_conf.ipv4_dst,
+ .next_proto_id = IPPROTO_UDP,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IPPROTO_UDP,
+ },
+ };
+ struct rte_flow_item_udp udp = {
+ .hdr = {
+ .src_port = mplsoudp_encap_conf.udp_src,
+ .dst_port = mplsoudp_encap_conf.udp_dst,
+ },
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_encap_data = ctx->object;
+ *action_encap_data = (struct action_raw_encap_data) {
+ .conf = (struct rte_flow_action_raw_encap){
+ .data = action_encap_data->data,
+ },
+ .data = {},
+ .preserve = {},
+ };
+ header = action_encap_data->data;
+ if (mplsoudp_encap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsoudp_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, &eth, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsoudp_encap_conf.select_vlan) {
+ if (mplsoudp_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsoudp_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(&ipv6.hdr.src_addr,
+ &mplsoudp_encap_conf.ipv6_src,
+ sizeof(mplsoudp_encap_conf.ipv6_src));
+ memcpy(&ipv6.hdr.dst_addr,
+ &mplsoudp_encap_conf.ipv6_dst,
+ sizeof(mplsoudp_encap_conf.ipv6_dst));
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &udp, sizeof(udp));
+ header += sizeof(udp);
+ memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
+ RTE_DIM(mplsoudp_encap_conf.label));
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_encap_data->conf.size = header -
+ action_encap_data->data;
+ action->conf = &action_encap_data->conf;
+ return ret;
+}
+
+/** Parse MPLSOUDP decap action. */
+static int
+parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_decap_data *action_decap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {.tci = 0};
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .next_proto_id = IPPROTO_UDP,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IPPROTO_UDP,
+ },
+ };
+ struct rte_flow_item_udp udp = {
+ .hdr = {
+ .dst_port = rte_cpu_to_be_16(6635),
+ },
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_decap_data = ctx->object;
+ *action_decap_data = (struct action_raw_decap_data) {
+ .conf = (struct rte_flow_action_raw_decap){
+ .data = action_decap_data->data,
+ },
+ .data = {},
+ };
+ header = action_decap_data->data;
+ if (mplsoudp_decap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsoudp_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, &eth, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsoudp_encap_conf.select_vlan) {
+ if (mplsoudp_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsoudp_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &udp, sizeof(udp));
+ header += sizeof(udp);
+ memset(&mpls, 0, sizeof(mpls));
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_decap_data->conf.size = header -
+ action_decap_data->data;
+ action->conf = &action_decap_data->conf;
+ return ret;
+}
+
/** Parse tokens for destroy command. */
static int
parse_destroy(struct context *ctx, const struct token *token,
diff --git a/app/test-pmd/cmdline_mtr.c b/app/test-pmd/cmdline_mtr.c
index f908fb35..32a47305 100644
--- a/app/test-pmd/cmdline_mtr.c
+++ b/app/test-pmd/cmdline_mtr.c
@@ -414,9 +414,9 @@ cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm = {
(void *)&cmd_add_port_meter_profile_srtcm_port,
(void *)&cmd_add_port_meter_profile_srtcm_meter,
(void *)&cmd_add_port_meter_profile_srtcm_profile,
+ (void *)&cmd_add_port_meter_profile_srtcm_srtcm_rfc2697,
(void *)&cmd_add_port_meter_profile_srtcm_port_id,
(void *)&cmd_add_port_meter_profile_srtcm_profile_id,
- (void *)&cmd_add_port_meter_profile_srtcm_srtcm_rfc2697,
(void *)&cmd_add_port_meter_profile_srtcm_cir,
(void *)&cmd_add_port_meter_profile_srtcm_cbs,
(void *)&cmd_add_port_meter_profile_srtcm_ebs,
@@ -521,9 +521,9 @@ cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm = {
(void *)&cmd_add_port_meter_profile_trtcm_port,
(void *)&cmd_add_port_meter_profile_trtcm_meter,
(void *)&cmd_add_port_meter_profile_trtcm_profile,
+ (void *)&cmd_add_port_meter_profile_trtcm_trtcm_rfc2698,
(void *)&cmd_add_port_meter_profile_trtcm_port_id,
(void *)&cmd_add_port_meter_profile_trtcm_profile_id,
- (void *)&cmd_add_port_meter_profile_trtcm_trtcm_rfc2698,
(void *)&cmd_add_port_meter_profile_trtcm_cir,
(void *)&cmd_add_port_meter_profile_trtcm_pir,
(void *)&cmd_add_port_meter_profile_trtcm_cbs,
@@ -633,9 +633,9 @@ cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115 = {
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_meter,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_profile,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_trtcm_rfc4115,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port_id,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_profile_id,
- (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_trtcm_rfc4115,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_cir,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_eir,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_cbs,
diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c
index 631f1799..b4307974 100644
--- a/app/test-pmd/cmdline_tm.c
+++ b/app/test-pmd/cmdline_tm.c
@@ -2187,3 +2187,263 @@ cmdline_parse_inst_t cmd_port_tm_hierarchy_commit = {
NULL,
},
};
+
+/* *** Port TM Mark IP ECN *** */
+struct cmd_port_tm_mark_ip_ecn_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t mark;
+ cmdline_fixed_string_t ip_ecn;
+ uint16_t port_id;
+ uint16_t green;
+ uint16_t yellow;
+ uint16_t red;
+};
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result,
+ set, "set");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result,
+ port, "port");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_tm =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, tm,
+ "tm");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_mark =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result,
+ mark, "mark");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_ip_ecn =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result,
+ ip_ecn, "ip_ecn");
+cmdline_parse_token_num_t cmd_port_tm_mark_ip_ecn_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result,
+ port_id, UINT16);
+
+cmdline_parse_token_num_t cmd_port_tm_mark_ip_ecn_green =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result,
+ green, UINT16);
+cmdline_parse_token_num_t cmd_port_tm_mark_ip_ecn_yellow =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result,
+ yellow, UINT16);
+cmdline_parse_token_num_t cmd_port_tm_mark_ip_ecn_red =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result,
+ red, UINT16);
+
+static void cmd_port_tm_mark_ip_ecn_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_port_tm_mark_ip_ecn_result *res = parsed_result;
+ struct rte_tm_error error;
+ portid_t port_id = res->port_id;
+ int green = res->green;
+ int yellow = res->yellow;
+ int red = res->red;
+ int ret;
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&error, 0, sizeof(struct rte_tm_error));
+ ret = rte_tm_mark_ip_ecn(port_id, green, yellow, red, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_port_tm_mark_ip_ecn = {
+ .f = cmd_port_tm_mark_ip_ecn_parsed,
+ .data = NULL,
+ .help_str = "set port tm mark ip_ecn <port> <green> <yellow> <red>",
+ .tokens = {
+ (void *)&cmd_port_tm_mark_ip_ecn_set,
+ (void *)&cmd_port_tm_mark_ip_ecn_port,
+ (void *)&cmd_port_tm_mark_ip_ecn_tm,
+ (void *)&cmd_port_tm_mark_ip_ecn_mark,
+ (void *)&cmd_port_tm_mark_ip_ecn_ip_ecn,
+ (void *)&cmd_port_tm_mark_ip_ecn_port_id,
+ (void *)&cmd_port_tm_mark_ip_ecn_green,
+ (void *)&cmd_port_tm_mark_ip_ecn_yellow,
+ (void *)&cmd_port_tm_mark_ip_ecn_red,
+ NULL,
+ },
+};
+
+
+/* *** Port TM Mark IP DSCP *** */
+struct cmd_port_tm_mark_ip_dscp_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t mark;
+ cmdline_fixed_string_t ip_dscp;
+ uint16_t port_id;
+ uint16_t green;
+ uint16_t yellow;
+ uint16_t red;
+};
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result,
+ set, "set");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result,
+ port, "port");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_tm =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, tm,
+ "tm");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_mark =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result,
+ mark, "mark");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_ip_dscp =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result,
+ ip_dscp, "ip_dscp");
+cmdline_parse_token_num_t cmd_port_tm_mark_ip_dscp_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result,
+ port_id, UINT16);
+
+cmdline_parse_token_num_t cmd_port_tm_mark_ip_dscp_green =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result,
+ green, UINT16);
+cmdline_parse_token_num_t cmd_port_tm_mark_ip_dscp_yellow =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result,
+ yellow, UINT16);
+cmdline_parse_token_num_t cmd_port_tm_mark_ip_dscp_red =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result,
+ red, UINT16);
+
+static void cmd_port_tm_mark_ip_dscp_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_port_tm_mark_ip_dscp_result *res = parsed_result;
+ struct rte_tm_error error;
+ portid_t port_id = res->port_id;
+ int green = res->green;
+ int yellow = res->yellow;
+ int red = res->red;
+ int ret;
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&error, 0, sizeof(struct rte_tm_error));
+ ret = rte_tm_mark_ip_dscp(port_id, green, yellow, red, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_port_tm_mark_ip_dscp = {
+ .f = cmd_port_tm_mark_ip_dscp_parsed,
+ .data = NULL,
+ .help_str = "set port tm mark ip_dscp <port> <green> <yellow> <red>",
+ .tokens = {
+ (void *)&cmd_port_tm_mark_ip_dscp_set,
+ (void *)&cmd_port_tm_mark_ip_dscp_port,
+ (void *)&cmd_port_tm_mark_ip_dscp_tm,
+ (void *)&cmd_port_tm_mark_ip_dscp_mark,
+ (void *)&cmd_port_tm_mark_ip_dscp_ip_dscp,
+ (void *)&cmd_port_tm_mark_ip_dscp_port_id,
+ (void *)&cmd_port_tm_mark_ip_dscp_green,
+ (void *)&cmd_port_tm_mark_ip_dscp_yellow,
+ (void *)&cmd_port_tm_mark_ip_dscp_red,
+ NULL,
+ },
+};
+
+
+/* *** Port TM Mark VLAN_DEI *** */
+struct cmd_port_tm_mark_vlan_dei_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t mark;
+ cmdline_fixed_string_t vlan_dei;
+ uint16_t port_id;
+ uint16_t green;
+ uint16_t yellow;
+ uint16_t red;
+};
+
+cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result,
+ set, "set");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result,
+ port, "port");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_tm =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, tm,
+ "tm");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_mark =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result,
+ mark, "mark");
+
+cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_vlan_dei =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result,
+ vlan_dei, "vlan_dei");
+cmdline_parse_token_num_t cmd_port_tm_mark_vlan_dei_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result,
+ port_id, UINT16);
+
+cmdline_parse_token_num_t cmd_port_tm_mark_vlan_dei_green =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result,
+ green, UINT16);
+cmdline_parse_token_num_t cmd_port_tm_mark_vlan_dei_yellow =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result,
+ yellow, UINT16);
+cmdline_parse_token_num_t cmd_port_tm_mark_vlan_dei_red =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result,
+ red, UINT16);
+
+static void cmd_port_tm_mark_vlan_dei_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_port_tm_mark_vlan_dei_result *res = parsed_result;
+ struct rte_tm_error error;
+ portid_t port_id = res->port_id;
+ int green = res->green;
+ int yellow = res->yellow;
+ int red = res->red;
+ int ret;
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&error, 0, sizeof(struct rte_tm_error));
+ ret = rte_tm_mark_vlan_dei(port_id, green, yellow, red, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_port_tm_mark_vlan_dei = {
+ .f = cmd_port_tm_mark_vlan_dei_parsed,
+ .data = NULL,
+ .help_str = "set port tm mark vlan_dei <port> <green> <yellow> <red>",
+ .tokens = {
+ (void *)&cmd_port_tm_mark_vlan_dei_set,
+ (void *)&cmd_port_tm_mark_vlan_dei_port,
+ (void *)&cmd_port_tm_mark_vlan_dei_tm,
+ (void *)&cmd_port_tm_mark_vlan_dei_mark,
+ (void *)&cmd_port_tm_mark_vlan_dei_vlan_dei,
+ (void *)&cmd_port_tm_mark_vlan_dei_port_id,
+ (void *)&cmd_port_tm_mark_vlan_dei_green,
+ (void *)&cmd_port_tm_mark_vlan_dei_yellow,
+ (void *)&cmd_port_tm_mark_vlan_dei_red,
+ NULL,
+ },
+};
diff --git a/app/test-pmd/cmdline_tm.h b/app/test-pmd/cmdline_tm.h
index b3a14ade..950cb753 100644
--- a/app/test-pmd/cmdline_tm.h
+++ b/app/test-pmd/cmdline_tm.h
@@ -25,5 +25,8 @@ extern cmdline_parse_inst_t cmd_set_port_tm_node_parent;
extern cmdline_parse_inst_t cmd_suspend_port_tm_node;
extern cmdline_parse_inst_t cmd_resume_port_tm_node;
extern cmdline_parse_inst_t cmd_port_tm_hierarchy_commit;
+extern cmdline_parse_inst_t cmd_port_tm_mark_vlan_dei;
+extern cmdline_parse_inst_t cmd_port_tm_mark_ip_ecn;
+extern cmdline_parse_inst_t cmd_port_tm_mark_ip_dscp;
#endif /* _CMDLINE_TM_H_ */
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 14ccd686..b9e5dd92 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -50,6 +50,7 @@
#endif
#include <rte_gro.h>
#include <cmdline_parse_etheraddr.h>
+#include <rte_config.h>
#include "testpmd.h"
@@ -74,6 +75,10 @@ static const struct {
};
const struct rss_type_info rss_type_table[] = {
+ { "all", ETH_RSS_IP | ETH_RSS_TCP |
+ ETH_RSS_UDP | ETH_RSS_SCTP |
+ ETH_RSS_L2_PAYLOAD },
+ { "none", 0 },
{ "ipv4", ETH_RSS_IPV4 },
{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
@@ -410,6 +415,8 @@ port_infos_display(portid_t port_id)
rte_eth_dev_get_name_by_port(port_id, name);
printf("\nDevice name: %s", name);
printf("\nDriver name: %s", dev_info.driver_name);
+ if (dev_info.device->devargs && dev_info.device->devargs->args)
+ printf("\nDevargs: %s", dev_info.device->devargs->args);
printf("\nConnect to socket: %u", port->socket_id);
if (port_numa[port_id] != NUMA_NO_CONFIG) {
@@ -461,12 +468,12 @@ port_infos_display(portid_t port_id)
if (dev_info.reta_size > 0)
printf("Redirection table size: %u\n", dev_info.reta_size);
if (!dev_info.flow_type_rss_offloads)
- printf("No flow type is supported.\n");
+ printf("No RSS offload flow type is supported.\n");
else {
uint16_t i;
char *p;
- printf("Supported flow types:\n");
+ printf("Supported RSS offload flow types:\n");
for (i = RTE_ETH_FLOW_UNKNOWN + 1;
i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
@@ -518,6 +525,43 @@ port_infos_display(portid_t port_id)
}
void
+port_summary_header_display(void)
+{
+ uint16_t port_number;
+
+ port_number = rte_eth_dev_count_avail();
+ printf("Number of available ports: %i\n", port_number);
+ printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
+ "Driver", "Status", "Link");
+}
+
+void
+port_summary_display(portid_t port_id)
+{
+ struct ether_addr mac_addr;
+ struct rte_eth_link link;
+ struct rte_eth_dev_info dev_info;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ print_valid_ports();
+ return;
+ }
+
+ rte_eth_link_get_nowait(port_id, &link);
+ rte_eth_dev_info_get(port_id, &dev_info);
+ rte_eth_dev_get_name_by_port(port_id, name);
+ rte_eth_macaddr_get(port_id, &mac_addr);
+
+ printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
+ port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
+ mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
+ mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
+ dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
+ (unsigned int) link.link_speed);
+}
+
+void
port_offload_cap_display(portid_t port_id)
{
struct rte_eth_dev_info dev_info;
@@ -543,7 +587,7 @@ port_offload_cap_display(portid_t port_id)
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
printf("Double VLANs stripped: ");
if (ports[port_id].dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_EXTEND)
+ DEV_RX_OFFLOAD_QINQ_STRIP)
printf("on\n");
else
printf("off\n");
@@ -576,8 +620,17 @@ port_offload_cap_display(portid_t port_id)
printf("off\n");
}
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
+ printf("RX SCTP checksum: ");
+ if (ports[port_id].dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCTP_CKSUM)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
- printf("RX Outer IPv4 checksum: ");
+ printf("RX Outer IPv4 checksum: ");
if (ports[port_id].dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
printf("on\n");
@@ -585,19 +638,19 @@ port_offload_cap_display(portid_t port_id)
printf("off\n");
}
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
- printf("Large receive offload: ");
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
+ printf("RX Outer UDP checksum: ");
if (ports[port_id].dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_TCP_LRO)
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
printf("on\n");
else
printf("off\n");
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
- printf("VLAN insert: ");
- if (ports[port_id].dev_conf.txmode.offloads &
- DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
+ printf("Large receive offload: ");
+ if (ports[port_id].dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_TCP_LRO)
printf("on\n");
else
printf("off\n");
@@ -612,6 +665,33 @@ port_offload_cap_display(portid_t port_id)
printf("off\n");
}
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
+ printf("Rx Keep CRC: ");
+ if (ports[port_id].dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_KEEP_CRC)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
+ printf("RX offload security: ");
+ if (ports[port_id].dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
+ printf("VLAN insert: ");
+ if (ports[port_id].dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_VLAN_INSERT)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
printf("Double VLANs insert: ");
if (ports[port_id].dev_conf.txmode.offloads &
@@ -737,6 +817,16 @@ port_offload_cap_display(portid_t port_id)
else
printf("off\n");
}
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+ printf("TX Outer UDP checksum: ");
+ if (ports[port_id].dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
}
int
@@ -984,324 +1074,35 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
/* Generic flow management functions. */
-/** Generate flow_item[] entry. */
-#define MK_FLOW_ITEM(t, s) \
- [RTE_FLOW_ITEM_TYPE_ ## t] = { \
- .name = # t, \
- .size = s, \
- }
-
-/** Information about known flow pattern items. */
-static const struct {
- const char *name;
- size_t size;
-} flow_item[] = {
- MK_FLOW_ITEM(END, 0),
- MK_FLOW_ITEM(VOID, 0),
- MK_FLOW_ITEM(INVERT, 0),
- MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
- MK_FLOW_ITEM(PF, 0),
- MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
- MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
- MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
- MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
- MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
- MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
- MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
- MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
- MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
- MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
- MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
- MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
- MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
- MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
- MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
- MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
- MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
- MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
- MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
- MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
- MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
- MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
- MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
- MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
- MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
- MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
- MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
- MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
- MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
- MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
- sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
- MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
- sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
-};
-
-/** Pattern item specification types. */
-enum item_spec_type {
- ITEM_SPEC,
- ITEM_LAST,
- ITEM_MASK,
-};
-
-/** Compute storage space needed by item specification and copy it. */
-static size_t
-flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
- enum item_spec_type type)
-{
- size_t size = 0;
- const void *data =
- type == ITEM_SPEC ? item->spec :
- type == ITEM_LAST ? item->last :
- type == ITEM_MASK ? item->mask :
- NULL;
-
- if (!item->spec || !data)
- goto empty;
- switch (item->type) {
- union {
- const struct rte_flow_item_raw *raw;
- } spec;
- union {
- const struct rte_flow_item_raw *raw;
- } last;
- union {
- const struct rte_flow_item_raw *raw;
- } mask;
- union {
- const struct rte_flow_item_raw *raw;
- } src;
- union {
- struct rte_flow_item_raw *raw;
- } dst;
- size_t off;
-
- case RTE_FLOW_ITEM_TYPE_RAW:
- spec.raw = item->spec;
- last.raw = item->last ? item->last : item->spec;
- mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
- src.raw = data;
- dst.raw = buf;
- off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
- sizeof(*src.raw->pattern));
- if (type == ITEM_SPEC ||
- (type == ITEM_MASK &&
- ((spec.raw->length & mask.raw->length) >=
- (last.raw->length & mask.raw->length))))
- size = spec.raw->length & mask.raw->length;
- else
- size = last.raw->length & mask.raw->length;
- size = off + size * sizeof(*src.raw->pattern);
- if (dst.raw) {
- memcpy(dst.raw, src.raw, sizeof(*src.raw));
- dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
- src.raw->pattern,
- size - off);
- }
- break;
- default:
- size = flow_item[item->type].size;
- if (buf)
- memcpy(buf, data, size);
- break;
- }
-empty:
- return RTE_ALIGN_CEIL(size, sizeof(double));
-}
-
-/** Generate flow_action[] entry. */
-#define MK_FLOW_ACTION(t, s) \
- [RTE_FLOW_ACTION_TYPE_ ## t] = { \
- .name = # t, \
- .size = s, \
- }
-
-/** Information about known flow actions. */
-static const struct {
- const char *name;
- size_t size;
-} flow_action[] = {
- MK_FLOW_ACTION(END, 0),
- MK_FLOW_ACTION(VOID, 0),
- MK_FLOW_ACTION(PASSTHRU, 0),
- MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
- MK_FLOW_ACTION(FLAG, 0),
- MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
- MK_FLOW_ACTION(DROP, 0),
- MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
- MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
- MK_FLOW_ACTION(PF, 0),
- MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
- MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
- MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
- MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
- MK_FLOW_ACTION(OF_SET_MPLS_TTL,
- sizeof(struct rte_flow_action_of_set_mpls_ttl)),
- MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
- MK_FLOW_ACTION(OF_SET_NW_TTL,
- sizeof(struct rte_flow_action_of_set_nw_ttl)),
- MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
- MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
- MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
- MK_FLOW_ACTION(OF_POP_VLAN, 0),
- MK_FLOW_ACTION(OF_PUSH_VLAN,
- sizeof(struct rte_flow_action_of_push_vlan)),
- MK_FLOW_ACTION(OF_SET_VLAN_VID,
- sizeof(struct rte_flow_action_of_set_vlan_vid)),
- MK_FLOW_ACTION(OF_SET_VLAN_PCP,
- sizeof(struct rte_flow_action_of_set_vlan_pcp)),
- MK_FLOW_ACTION(OF_POP_MPLS,
- sizeof(struct rte_flow_action_of_pop_mpls)),
- MK_FLOW_ACTION(OF_PUSH_MPLS,
- sizeof(struct rte_flow_action_of_push_mpls)),
-};
-
-/** Compute storage space needed by action configuration and copy it. */
-static size_t
-flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
-{
- size_t size = 0;
-
- if (!action->conf)
- goto empty;
- switch (action->type) {
- union {
- const struct rte_flow_action_rss *rss;
- } src;
- union {
- struct rte_flow_action_rss *rss;
- } dst;
- size_t off;
-
- case RTE_FLOW_ACTION_TYPE_RSS:
- src.rss = action->conf;
- dst.rss = buf;
- off = 0;
- if (dst.rss)
- *dst.rss = (struct rte_flow_action_rss){
- .func = src.rss->func,
- .level = src.rss->level,
- .types = src.rss->types,
- .key_len = src.rss->key_len,
- .queue_num = src.rss->queue_num,
- };
- off += sizeof(*src.rss);
- if (src.rss->key_len) {
- off = RTE_ALIGN_CEIL(off, sizeof(double));
- size = sizeof(*src.rss->key) * src.rss->key_len;
- if (dst.rss)
- dst.rss->key = memcpy
- ((void *)((uintptr_t)dst.rss + off),
- src.rss->key, size);
- off += size;
- }
- if (src.rss->queue_num) {
- off = RTE_ALIGN_CEIL(off, sizeof(double));
- size = sizeof(*src.rss->queue) * src.rss->queue_num;
- if (dst.rss)
- dst.rss->queue = memcpy
- ((void *)((uintptr_t)dst.rss + off),
- src.rss->queue, size);
- off += size;
- }
- size = off;
- break;
- default:
- size = flow_action[action->type].size;
- if (buf)
- memcpy(buf, action->conf, size);
- break;
- }
-empty:
- return RTE_ALIGN_CEIL(size, sizeof(double));
-}
-
/** Generate a port_flow entry from attributes/pattern/actions. */
static struct port_flow *
port_flow_new(const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
- const struct rte_flow_action *actions)
-{
- const struct rte_flow_item *item;
- const struct rte_flow_action *action;
- struct port_flow *pf = NULL;
- size_t tmp;
- size_t off1 = 0;
- size_t off2 = 0;
- int err = ENOTSUP;
-
-store:
- item = pattern;
- if (pf)
- pf->pattern = (void *)&pf->data[off1];
- do {
- struct rte_flow_item *dst = NULL;
-
- if ((unsigned int)item->type >= RTE_DIM(flow_item) ||
- !flow_item[item->type].name)
- goto notsup;
- if (pf)
- dst = memcpy(pf->data + off1, item, sizeof(*item));
- off1 += sizeof(*item);
- if (item->spec) {
- if (pf)
- dst->spec = pf->data + off2;
- off2 += flow_item_spec_copy
- (pf ? pf->data + off2 : NULL, item, ITEM_SPEC);
- }
- if (item->last) {
- if (pf)
- dst->last = pf->data + off2;
- off2 += flow_item_spec_copy
- (pf ? pf->data + off2 : NULL, item, ITEM_LAST);
- }
- if (item->mask) {
- if (pf)
- dst->mask = pf->data + off2;
- off2 += flow_item_spec_copy
- (pf ? pf->data + off2 : NULL, item, ITEM_MASK);
- }
- off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
- } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
- off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
- action = actions;
- if (pf)
- pf->actions = (void *)&pf->data[off1];
- do {
- struct rte_flow_action *dst = NULL;
-
- if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
- !flow_action[action->type].name)
- goto notsup;
- if (pf)
- dst = memcpy(pf->data + off1, action, sizeof(*action));
- off1 += sizeof(*action);
- if (action->conf) {
- if (pf)
- dst->conf = pf->data + off2;
- off2 += flow_action_conf_copy
- (pf ? pf->data + off2 : NULL, action);
- }
- off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
- } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
- if (pf != NULL)
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_conv_rule rule = {
+ .attr_ro = attr,
+ .pattern_ro = pattern,
+ .actions_ro = actions,
+ };
+ struct port_flow *pf;
+ int ret;
+
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
+ if (ret < 0)
+ return NULL;
+ pf = calloc(1, offsetof(struct port_flow, rule) + ret);
+ if (!pf) {
+ rte_flow_error_set
+ (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "calloc() failed");
+ return NULL;
+ }
+ if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
+ error) >= 0)
return pf;
- off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
- tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double));
- pf = calloc(1, tmp + off1 + off2);
- if (pf == NULL)
- err = errno;
- else {
- *pf = (const struct port_flow){
- .size = tmp + off1 + off2,
- .attr = *attr,
- };
- tmp -= offsetof(struct port_flow, data);
- off2 = tmp + off1;
- off1 = tmp;
- goto store;
- }
-notsup:
- rte_errno = err;
+ free(pf);
return NULL;
}
@@ -1337,11 +1138,12 @@ port_flow_complain(struct rte_flow_error *error)
errstr = "unknown type";
else
errstr = errstrlist[error->type];
- printf("Caught error type %d (%s): %s%s\n",
+ printf("Caught error type %d (%s): %s%s: %s\n",
error->type, errstr,
error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
error->cause), buf) : "",
- error->message ? error->message : "(no stated reason)");
+ error->message ? error->message : "(no stated reason)",
+ rte_strerror(err));
return -err;
}
@@ -1391,13 +1193,10 @@ port_flow_create(portid_t port_id,
id = port->flow_list->id + 1;
} else
id = 0;
- pf = port_flow_new(attr, pattern, actions);
+ pf = port_flow_new(attr, pattern, actions, &error);
if (!pf) {
- int err = rte_errno;
-
- printf("Cannot allocate flow: %s\n", rte_strerror(err));
rte_flow_destroy(port_id, flow, NULL);
- return -err;
+ return port_flow_complain(&error);
}
pf->next = port->flow_list;
pf->id = id;
@@ -1489,6 +1288,7 @@ port_flow_query(portid_t port_id, uint32_t rule,
union {
struct rte_flow_query_count count;
} query;
+ int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
port_id == (portid_t)RTE_PORT_ALL)
@@ -1501,11 +1301,11 @@ port_flow_query(portid_t port_id, uint32_t rule,
printf("Flow rule #%u not found\n", rule);
return -ENOENT;
}
- if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
- !flow_action[action->type].name)
- name = "unknown";
- else
- name = flow_action[action->type].name;
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
+ &name, sizeof(name),
+ (void *)(uintptr_t)action->type, &error);
+ if (ret < 0)
+ return port_flow_complain(&error);
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
break;
@@ -1558,48 +1358,63 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
/* Sort flows by group, priority and ID. */
for (pf = port->flow_list; pf != NULL; pf = pf->next) {
struct port_flow **tmp;
+ const struct rte_flow_attr *curr = pf->rule.attr;
if (n) {
/* Filter out unwanted groups. */
for (i = 0; i != n; ++i)
- if (pf->attr.group == group[i])
+ if (curr->group == group[i])
break;
if (i == n)
continue;
}
- tmp = &list;
- while (*tmp &&
- (pf->attr.group > (*tmp)->attr.group ||
- (pf->attr.group == (*tmp)->attr.group &&
- pf->attr.priority > (*tmp)->attr.priority) ||
- (pf->attr.group == (*tmp)->attr.group &&
- pf->attr.priority == (*tmp)->attr.priority &&
- pf->id > (*tmp)->id)))
- tmp = &(*tmp)->tmp;
+ for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
+ const struct rte_flow_attr *comp = (*tmp)->rule.attr;
+
+ if (curr->group > comp->group ||
+ (curr->group == comp->group &&
+ curr->priority > comp->priority) ||
+ (curr->group == comp->group &&
+ curr->priority == comp->priority &&
+ pf->id > (*tmp)->id))
+ continue;
+ break;
+ }
pf->tmp = *tmp;
*tmp = pf;
}
printf("ID\tGroup\tPrio\tAttr\tRule\n");
for (pf = list; pf != NULL; pf = pf->tmp) {
- const struct rte_flow_item *item = pf->pattern;
- const struct rte_flow_action *action = pf->actions;
+ const struct rte_flow_item *item = pf->rule.pattern;
+ const struct rte_flow_action *action = pf->rule.actions;
+ const char *name;
printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
pf->id,
- pf->attr.group,
- pf->attr.priority,
- pf->attr.ingress ? 'i' : '-',
- pf->attr.egress ? 'e' : '-',
- pf->attr.transfer ? 't' : '-');
+ pf->rule.attr->group,
+ pf->rule.attr->priority,
+ pf->rule.attr->ingress ? 'i' : '-',
+ pf->rule.attr->egress ? 'e' : '-',
+ pf->rule.attr->transfer ? 't' : '-');
while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
+ &name, sizeof(name),
+ (void *)(uintptr_t)item->type,
+ NULL) <= 0)
+ name = "[UNKNOWN]";
if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
- printf("%s ", flow_item[item->type].name);
+ printf("%s ", name);
++item;
}
printf("=>");
while (action->type != RTE_FLOW_ACTION_TYPE_END) {
+ if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
+ &name, sizeof(name),
+ (void *)(uintptr_t)action->type,
+ NULL) <= 0)
+ name = "[UNKNOWN]";
if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
- printf(" %s", flow_action[action->type].name);
+ printf(" %s", name);
++action;
}
printf("\n");
@@ -1669,8 +1484,8 @@ ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
- snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
- ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
+ snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
+ port_id, q_id, ring_name);
mz = rte_memzone_lookup(mz_name);
if (mz == NULL)
printf("%s ring memory zoneof (port %d, queue %d) not"
@@ -1927,9 +1742,9 @@ port_rss_reta_info(portid_t port_id,
* key of the port.
*/
void
-port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
+port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
{
- struct rte_eth_rss_conf rss_conf;
+ struct rte_eth_rss_conf rss_conf = {0};
uint8_t rss_key[RSS_HASH_KEY_LENGTH];
uint64_t rss_hf;
uint8_t i;
@@ -1940,7 +1755,6 @@ port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
if (dev_info.hash_key_size > 0 &&
dev_info.hash_key_size <= sizeof(rss_key))
@@ -1950,12 +1764,6 @@ port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
return;
}
- rss_conf.rss_hf = 0;
- for (i = 0; rss_type_table[i].str; i++) {
- if (!strcmp(rss_info, rss_type_table[i].str))
- rss_conf.rss_hf = rss_type_table[i].rss_type;
- }
-
/* Get RSS hash key if asked to display it */
rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
rss_conf.rss_key_len = hash_key_size;
@@ -2403,6 +2211,23 @@ fwd_config_setup(void)
simple_fwd_config_setup();
}
+static const char *
+mp_alloc_to_str(uint8_t mode)
+{
+ switch (mode) {
+ case MP_ALLOC_NATIVE:
+ return "native";
+ case MP_ALLOC_ANON:
+ return "anon";
+ case MP_ALLOC_XMEM:
+ return "xmem";
+ case MP_ALLOC_XMEM_HUGE:
+ return "xmemhuge";
+ default:
+ return "invalid";
+ }
+}
+
void
pkt_fwd_config_display(struct fwd_config *cfg)
{
@@ -2411,12 +2236,12 @@ pkt_fwd_config_display(struct fwd_config *cfg)
streamid_t sm_id;
printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
- "NUMA support %s, MP over anonymous pages %s\n",
+ "NUMA support %s, MP allocation mode: %s\n",
cfg->fwd_eng->fwd_mode_name,
retry_enabled == 0 ? "" : " with retry",
cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
numa_support == 1 ? "enabled" : "disabled",
- mp_anon != 0 ? "enabled" : "disabled");
+ mp_alloc_to_str(mp_alloc_type));
if (retry_enabled)
printf("TX retry num: %u, delay between TX retries: %uus\n",
@@ -2886,11 +2711,102 @@ set_pkt_forwarding_mode(const char *fwd_mode_name)
}
void
+add_rx_dump_callbacks(portid_t portid)
+{
+ struct rte_eth_dev_info dev_info;
+ uint16_t queue;
+
+ if (port_id_is_invalid(portid, ENABLED_WARN))
+ return;
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
+ if (!ports[portid].rx_dump_cb[queue])
+ ports[portid].rx_dump_cb[queue] =
+ rte_eth_add_rx_callback(portid, queue,
+ dump_rx_pkts, NULL);
+}
+
+void
+add_tx_dump_callbacks(portid_t portid)
+{
+ struct rte_eth_dev_info dev_info;
+ uint16_t queue;
+
+ if (port_id_is_invalid(portid, ENABLED_WARN))
+ return;
+ rte_eth_dev_info_get(portid, &dev_info);
+ for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
+ if (!ports[portid].tx_dump_cb[queue])
+ ports[portid].tx_dump_cb[queue] =
+ rte_eth_add_tx_callback(portid, queue,
+ dump_tx_pkts, NULL);
+}
+
+void
+remove_rx_dump_callbacks(portid_t portid)
+{
+ struct rte_eth_dev_info dev_info;
+ uint16_t queue;
+
+ if (port_id_is_invalid(portid, ENABLED_WARN))
+ return;
+ rte_eth_dev_info_get(portid, &dev_info);
+ for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
+ if (ports[portid].rx_dump_cb[queue]) {
+ rte_eth_remove_rx_callback(portid, queue,
+ ports[portid].rx_dump_cb[queue]);
+ ports[portid].rx_dump_cb[queue] = NULL;
+ }
+}
+
+void
+remove_tx_dump_callbacks(portid_t portid)
+{
+ struct rte_eth_dev_info dev_info;
+ uint16_t queue;
+
+ if (port_id_is_invalid(portid, ENABLED_WARN))
+ return;
+ rte_eth_dev_info_get(portid, &dev_info);
+ for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
+ if (ports[portid].tx_dump_cb[queue]) {
+ rte_eth_remove_tx_callback(portid, queue,
+ ports[portid].tx_dump_cb[queue]);
+ ports[portid].tx_dump_cb[queue] = NULL;
+ }
+}
+
+void
+configure_rxtx_dump_callbacks(uint16_t verbose)
+{
+ portid_t portid;
+
+#ifndef RTE_ETHDEV_RXTX_CALLBACKS
+ TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
+ return;
+#endif
+
+ RTE_ETH_FOREACH_DEV(portid)
+ {
+ if (verbose == 1 || verbose > 2)
+ add_rx_dump_callbacks(portid);
+ else
+ remove_rx_dump_callbacks(portid);
+ if (verbose >= 2)
+ add_tx_dump_callbacks(portid);
+ else
+ remove_tx_dump_callbacks(portid);
+ }
+}
+
+void
set_verbose_level(uint16_t vb_level)
{
printf("Change verbose level from %u to %u\n",
(unsigned int) verbose_level, (unsigned int) vb_level);
verbose_level = vb_level;
+ configure_rxtx_dump_callbacks(verbose_level);
}
void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 49482926..dce4b9be 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -468,10 +468,15 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
if (info->outer_l4_proto != IPPROTO_UDP)
return ol_flags;
+ /* Skip SW outer UDP checksum generation if HW supports it */
+ if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
+ ol_flags |= PKT_TX_OUTER_UDP_CKSUM;
+ return ol_flags;
+ }
+
udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
- /* outer UDP checksum is done in software as we have no hardware
- * supporting it today, and no API for it. In the other side, for
+ /* outer UDP checksum is done in software. In the other side, for
* UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
* set to zero.
*
@@ -696,6 +701,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint32_t retry;
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
+ uint32_t rx_bad_outer_l4_csum;
struct testpmd_offload_info info;
uint16_t nb_segments = 0;
int ret;
@@ -721,6 +727,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
rx_bad_l4_csum = 0;
+ rx_bad_outer_l4_csum = 0;
gro_enable = gro_ports[fs->rx_port].enable;
txp = &ports[fs->tx_port];
@@ -748,6 +755,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rx_bad_ip_csum += 1;
if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD)
rx_bad_l4_csum += 1;
+ if (rx_ol_flags & PKT_RX_OUTER_L4_CKSUM_BAD)
+ rx_bad_outer_l4_csum += 1;
/* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
* and inner headers */
@@ -826,6 +835,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (info.tunnel_tso_segsz ||
(tx_offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ (tx_offloads &
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
(tx_ol_flags & PKT_TX_OUTER_IPV6)) {
m->outer_l2_len = info.outer_l2_len;
m->outer_l3_len = info.outer_l3_len;
@@ -898,6 +909,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (info.is_tunnel == 1) {
if ((tx_offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ (tx_offloads &
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
(tx_ol_flags & PKT_TX_OUTER_IPV6))
printf("tx: m->outer_l2_len=%d "
"m->outer_l3_len=%d\n",
@@ -982,6 +995,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
fs->tx_packets += nb_tx;
fs->rx_bad_ip_csum += rx_bad_ip_csum;
fs->rx_bad_l4_csum += rx_bad_l4_csum;
+ fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index a0b3be07..6006c60f 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -17,12 +17,14 @@ sources = files('cmdline.c',
'iofwd.c',
'macfwd.c',
'macswap.c',
+ 'noisy_vnf.c',
'parameters.c',
'rxonly.c',
'testpmd.c',
- 'txonly.c')
+ 'txonly.c',
+ 'util.c')
-deps = ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci']
+deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci']
if dpdk_conf.has('RTE_LIBRTE_PDUMP')
deps += 'pdump'
endif
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
new file mode 100644
index 00000000..58c4ee92
--- /dev/null
+++ b/app/test-pmd/noisy_vnf.c
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Red Hat Corp.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_memcpy.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_malloc.h>
+
+#include "testpmd.h"
+
+struct noisy_config {
+ struct rte_ring *f;
+ uint64_t prev_time;
+ char *vnf_mem;
+ bool do_buffering;
+ bool do_flush;
+ bool do_sim;
+};
+
+struct noisy_config *noisy_cfg[RTE_MAX_ETHPORTS];
+
+static inline void
+do_write(char *vnf_mem)
+{
+ uint64_t i = rte_rand();
+ uint64_t w = rte_rand();
+
+ vnf_mem[i % ((noisy_lkup_mem_sz * 1024 * 1024) /
+ RTE_CACHE_LINE_SIZE)] = w;
+}
+
+static inline void
+do_read(char *vnf_mem)
+{
+ uint64_t i = rte_rand();
+ uint64_t r;
+
+ r = vnf_mem[i % ((noisy_lkup_mem_sz * 1024 * 1024) /
+ RTE_CACHE_LINE_SIZE)];
+ r++;
+}
+
+static inline void
+do_readwrite(char *vnf_mem)
+{
+ do_read(vnf_mem);
+ do_write(vnf_mem);
+}
+
+/*
+ * Simulate route lookups as defined by commandline parameters
+ */
+static void
+sim_memory_lookups(struct noisy_config *ncf, uint16_t nb_pkts)
+{
+ uint16_t i, j;
+
+ if (!ncf->do_sim)
+ return;
+
+ for (i = 0; i < nb_pkts; i++) {
+ for (j = 0; j < noisy_lkup_num_writes; j++)
+ do_write(ncf->vnf_mem);
+ for (j = 0; j < noisy_lkup_num_reads; j++)
+ do_read(ncf->vnf_mem);
+ for (j = 0; j < noisy_lkup_num_reads_writes; j++)
+ do_readwrite(ncf->vnf_mem);
+ }
+}
+
+static uint16_t
+do_retry(uint16_t nb_rx, uint16_t nb_tx, struct rte_mbuf **pkts,
+ struct fwd_stream *fs)
+{
+ uint32_t retry = 0;
+
+ while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts[nb_tx], nb_rx - nb_tx);
+ }
+
+ return nb_tx;
+}
+
+static uint32_t
+drop_pkts(struct rte_mbuf **pkts, uint16_t nb_rx, uint16_t nb_tx)
+{
+ if (nb_tx < nb_rx) {
+ do {
+ rte_pktmbuf_free(pkts[nb_tx]);
+ } while (++nb_tx < nb_rx);
+ }
+
+ return nb_rx - nb_tx;
+}
+
+/*
+ * Forwarding of packets in noisy VNF mode. Forward packets but perform
+ * memory operations first as specified on cmdline.
+ *
+ * Depending on which commandline parameters are specified we have
+ * different cases to handle:
+ *
+ * 1. No FIFO size was given, so we don't do buffering of incoming
+ * packets. This case is pretty much what iofwd does but in this case
+ * we also do simulation of memory accesses (depending on which
+ * parameters were specified for it).
+ * 2. User wants do buffer packets in a FIFO and sent out overflowing
+ * packets.
+ * 3. User wants a FIFO and specifies a time in ms to flush all packets
+ * out of the FIFO
+ * 4. Cases 2 and 3 combined
+ */
+static void
+pkt_burst_noisy_vnf(struct fwd_stream *fs)
+{
+ const uint64_t freq_khz = rte_get_timer_hz() / 1000;
+ struct noisy_config *ncf = noisy_cfg[fs->rx_port];
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *tmp_pkts[MAX_PKT_BURST];
+ uint16_t nb_deqd = 0;
+ uint16_t nb_rx = 0;
+ uint16_t nb_tx = 0;
+ uint16_t nb_enqd;
+ unsigned int fifo_free;
+ uint64_t delta_ms;
+ bool needs_flush = false;
+ uint64_t now;
+
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
+ pkts_burst, nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ goto flush;
+ fs->rx_packets += nb_rx;
+
+ if (!ncf->do_buffering) {
+ sim_memory_lookups(ncf, nb_rx);
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ pkts_burst, nb_rx);
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled)
+ nb_tx += do_retry(nb_rx, nb_tx, pkts_burst, fs);
+ fs->tx_packets += nb_tx;
+ fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx);
+ return;
+ }
+
+ fifo_free = rte_ring_free_count(ncf->f);
+ if (fifo_free >= nb_rx) {
+ nb_enqd = rte_ring_enqueue_burst(ncf->f,
+ (void **) pkts_burst, nb_rx, NULL);
+ if (nb_enqd < nb_rx)
+ fs->fwd_dropped += drop_pkts(pkts_burst,
+ nb_rx, nb_enqd);
+ } else {
+ nb_deqd = rte_ring_dequeue_burst(ncf->f,
+ (void **) tmp_pkts, nb_rx, NULL);
+ nb_enqd = rte_ring_enqueue_burst(ncf->f,
+ (void **) pkts_burst, nb_deqd, NULL);
+ if (nb_deqd > 0) {
+ nb_tx = rte_eth_tx_burst(fs->tx_port,
+ fs->tx_queue, tmp_pkts,
+ nb_deqd);
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled)
+ nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs);
+ fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, nb_tx);
+ }
+ }
+
+ sim_memory_lookups(ncf, nb_enqd);
+
+flush:
+ if (ncf->do_flush) {
+ if (!ncf->prev_time)
+ now = ncf->prev_time = rte_get_timer_cycles();
+ else
+ now = rte_get_timer_cycles();
+ delta_ms = (now - ncf->prev_time) / freq_khz;
+ needs_flush = delta_ms >= noisy_tx_sw_buf_flush_time &&
+ noisy_tx_sw_buf_flush_time > 0 && !nb_tx;
+ }
+ while (needs_flush && !rte_ring_empty(ncf->f)) {
+ unsigned int sent;
+ nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **)tmp_pkts,
+ MAX_PKT_BURST, NULL);
+ sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ tmp_pkts, nb_deqd);
+ if (unlikely(sent < nb_deqd) && fs->retry_enabled)
+ nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs);
+ fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
+ ncf->prev_time = rte_get_timer_cycles();
+ }
+}
+
+#define NOISY_STRSIZE 256
+#define NOISY_RING "noisy_ring_%d\n"
+
+static void
+noisy_fwd_end(portid_t pi)
+{
+ rte_ring_free(noisy_cfg[pi]->f);
+ rte_free(noisy_cfg[pi]->vnf_mem);
+ rte_free(noisy_cfg[pi]);
+}
+
+static void
+noisy_fwd_begin(portid_t pi)
+{
+ struct noisy_config *n;
+ char name[NOISY_STRSIZE];
+
+ noisy_cfg[pi] = rte_zmalloc("testpmd noisy fifo and timers",
+ sizeof(struct noisy_config),
+ RTE_CACHE_LINE_SIZE);
+ if (noisy_cfg[pi] == NULL) {
+ rte_exit(EXIT_FAILURE,
+ "rte_zmalloc(%d) struct noisy_config) failed\n",
+ (int) pi);
+ }
+ n = noisy_cfg[pi];
+ n->do_buffering = noisy_tx_sw_bufsz > 0;
+ n->do_sim = noisy_lkup_num_writes + noisy_lkup_num_reads +
+ noisy_lkup_num_reads_writes;
+ n->do_flush = noisy_tx_sw_buf_flush_time > 0;
+
+ if (n->do_buffering) {
+ snprintf(name, NOISY_STRSIZE, NOISY_RING, pi);
+ n->f = rte_ring_create(name, noisy_tx_sw_bufsz,
+ rte_socket_id(), 0);
+ if (!n->f)
+ rte_exit(EXIT_FAILURE,
+ "rte_ring_create(%d), size %d) failed\n",
+ (int) pi,
+ noisy_tx_sw_bufsz);
+ }
+ if (noisy_lkup_mem_sz > 0) {
+ n->vnf_mem = (char *) rte_zmalloc("vnf sim memory",
+ noisy_lkup_mem_sz * 1024 * 1024,
+ RTE_CACHE_LINE_SIZE);
+ if (!n->vnf_mem)
+ rte_exit(EXIT_FAILURE,
+ "rte_zmalloc(%" PRIu64 ") for vnf memory) failed\n",
+ noisy_lkup_mem_sz);
+ } else if (n->do_sim) {
+ rte_exit(EXIT_FAILURE,
+ "--noisy-lkup-memory-size must be > 0\n");
+ }
+}
+
+struct fwd_engine noisy_vnf_engine = {
+ .fwd_mode_name = "noisy",
+ .port_fwd_begin = noisy_fwd_begin,
+ .port_fwd_end = noisy_fwd_end,
+ .packet_fwd = pkt_burst_noisy_vnf,
+};
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 962fad78..38b41976 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -190,6 +190,17 @@ usage(char* progname)
printf(" --vxlan-gpe-port=N: UPD port of tunnel VXLAN-GPE\n");
printf(" --mlockall: lock all memory\n");
printf(" --no-mlockall: do not lock all memory\n");
+ printf(" --mp-alloc <native|anon|xmem|xmemhuge>: mempool allocation method.\n"
+ " native: use regular DPDK memory to create and populate mempool\n"
+ " anon: use regular DPDK memory to create and anonymous memory to populate mempool\n"
+ " xmem: use anonymous memory to create and populate mempool\n"
+ " xmemhuge: use anonymous hugepage memory to create and populate mempool\n");
+ printf(" --noisy-tx-sw-buffer-size=N: size of FIFO buffer\n");
+ printf(" --noisy-tx-sw-buffer-flushtime=N: flush FIFO after N ms\n");
+ printf(" --noisy-lkup-memory=N: allocate N MB of VNF memory\n");
+ printf(" --noisy-lkup-num-writes=N: do N random writes per packet\n");
+ printf(" --noisy-lkup-num-reads=N: do N random reads per packet\n");
+ printf(" --noisy-lkup-num-writes=N: do N random reads and writes per packet\n");
}
#ifdef RTE_LIBRTE_CMDLINE
@@ -405,8 +416,11 @@ parse_portnuma_config(const char *q_arg)
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
if (new_socket_id(socket_id)) {
- print_invalid_socket_id_error();
- return -1;
+ if (num_sockets >= RTE_MAX_NUMA_NODES) {
+ print_invalid_socket_id_error();
+ return -1;
+ }
+ socket_ids[num_sockets++] = socket_id;
}
port_numa[port_id] = socket_id;
}
@@ -462,8 +476,11 @@ parse_ringnuma_config(const char *q_arg)
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
if (new_socket_id(socket_id)) {
- print_invalid_socket_id_error();
- return -1;
+ if (num_sockets >= RTE_MAX_NUMA_NODES) {
+ print_invalid_socket_id_error();
+ return -1;
+ }
+ socket_ids[num_sockets++] = socket_id;
}
ring_flag = (uint8_t)int_fld[FLD_FLAG];
if ((ring_flag < RX_RING_ONLY) || (ring_flag > RXTX_RING)) {
@@ -625,6 +642,13 @@ launch_args_parse(int argc, char** argv)
{ "vxlan-gpe-port", 1, 0, 0 },
{ "mlockall", 0, 0, 0 },
{ "no-mlockall", 0, 0, 0 },
+ { "mp-alloc", 1, 0, 0 },
+ { "noisy-tx-sw-buffer-size", 1, 0, 0 },
+ { "noisy-tx-sw-buffer-flushtime", 1, 0, 0 },
+ { "noisy-lkup-memory", 1, 0, 0 },
+ { "noisy-lkup-num-writes", 1, 0, 0 },
+ { "noisy-lkup-num-reads", 1, 0, 0 },
+ { "noisy-lkup-num-reads-writes", 1, 0, 0 },
{ 0, 0, 0, 0 },
};
@@ -743,7 +767,22 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "numa"))
numa_support = 1;
if (!strcmp(lgopts[opt_idx].name, "mp-anon")) {
- mp_anon = 1;
+ mp_alloc_type = MP_ALLOC_ANON;
+ }
+ if (!strcmp(lgopts[opt_idx].name, "mp-alloc")) {
+ if (!strcmp(optarg, "native"))
+ mp_alloc_type = MP_ALLOC_NATIVE;
+ else if (!strcmp(optarg, "anon"))
+ mp_alloc_type = MP_ALLOC_ANON;
+ else if (!strcmp(optarg, "xmem"))
+ mp_alloc_type = MP_ALLOC_XMEM;
+ else if (!strcmp(optarg, "xmemhuge"))
+ mp_alloc_type = MP_ALLOC_XMEM_HUGE;
+ else
+ rte_exit(EXIT_FAILURE,
+ "mp-alloc %s invalid - must be: "
+ "native, anon, xmem or xmemhuge\n",
+ optarg);
}
if (!strcmp(lgopts[opt_idx].name, "port-numa-config")) {
if (parse_portnuma_config(optarg))
@@ -878,10 +917,8 @@ launch_args_parse(int argc, char** argv)
" must be >= 0\n", n);
}
#endif
- if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip")) {
- rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
- }
if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
@@ -1147,6 +1184,60 @@ launch_args_parse(int argc, char** argv)
do_mlockall = 1;
if (!strcmp(lgopts[opt_idx].name, "no-mlockall"))
do_mlockall = 0;
+ if (!strcmp(lgopts[opt_idx].name,
+ "noisy-tx-sw-buffer-size")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ noisy_tx_sw_bufsz = n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "noisy-tx-sw-buffer-size must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name,
+ "noisy-tx-sw-buffer-flushtime")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ noisy_tx_sw_buf_flush_time = n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "noisy-tx-sw-buffer-flushtime must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name,
+ "noisy-lkup-memory")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ noisy_lkup_mem_sz = n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "noisy-lkup-memory must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name,
+ "noisy-lkup-num-writes")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ noisy_lkup_num_writes = n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "noisy-lkup-num-writes must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name,
+ "noisy-lkup-num-reads")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ noisy_lkup_num_reads = n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "noisy-lkup-num-reads must be >= 0\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name,
+ "noisy-lkup-num-reads-writes")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ noisy_lkup_num_reads_writes = n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "noisy-lkup-num-reads-writes must be >= 0\n");
+ }
break;
case 'h':
usage(argv[0]);
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index a93d8061..5c65fc42 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -40,14 +40,6 @@
#include "testpmd.h"
-static inline void
-print_ether_addr(const char *what, struct ether_addr *eth_addr)
-{
- char buf[ETHER_ADDR_FMT_SIZE];
- ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
- printf("%s%s", what, buf);
-}
-
/*
* Received a burst of packets.
*/
@@ -55,16 +47,8 @@ static void
pkt_burst_receive(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_mbuf *mb;
- struct ether_hdr *eth_hdr;
- uint16_t eth_type;
- uint64_t ol_flags;
uint16_t nb_rx;
- uint16_t i, packet_type;
- uint16_t is_encapsulation;
- char buf[256];
- struct rte_net_hdr_lens hdr_lens;
- uint32_t sw_packet_type;
+ uint16_t i;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@@ -86,124 +70,8 @@ pkt_burst_receive(struct fwd_stream *fs)
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
fs->rx_packets += nb_rx;
-
- /*
- * Dump each received packet if verbose_level > 0.
- */
- if (verbose_level > 0)
- printf("port %u/queue %u: received %u packets\n",
- fs->rx_port,
- (unsigned) fs->rx_queue,
- (unsigned) nb_rx);
- for (i = 0; i < nb_rx; i++) {
- mb = pkts_burst[i];
- if (verbose_level == 0) {
- rte_pktmbuf_free(mb);
- continue;
- }
- eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
- eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
- ol_flags = mb->ol_flags;
- packet_type = mb->packet_type;
- is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type);
-
- print_ether_addr(" src=", &eth_hdr->s_addr);
- print_ether_addr(" - dst=", &eth_hdr->d_addr);
- printf(" - type=0x%04x - length=%u - nb_segs=%d",
- eth_type, (unsigned) mb->pkt_len,
- (int)mb->nb_segs);
- if (ol_flags & PKT_RX_RSS_HASH) {
- printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
- printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
- }
- if (ol_flags & PKT_RX_FDIR) {
- printf(" - FDIR matched ");
- if (ol_flags & PKT_RX_FDIR_ID)
- printf("ID=0x%x",
- mb->hash.fdir.hi);
- else if (ol_flags & PKT_RX_FDIR_FLX)
- printf("flex bytes=0x%08x %08x",
- mb->hash.fdir.hi, mb->hash.fdir.lo);
- else
- printf("hash=0x%x ID=0x%x ",
- mb->hash.fdir.hash, mb->hash.fdir.id);
- }
- if (ol_flags & PKT_RX_TIMESTAMP)
- printf(" - timestamp %"PRIu64" ", mb->timestamp);
- if (ol_flags & PKT_RX_VLAN_STRIPPED)
- printf(" - VLAN tci=0x%x", mb->vlan_tci);
- if (ol_flags & PKT_RX_QINQ_STRIPPED)
- printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
- mb->vlan_tci, mb->vlan_tci_outer);
- if (mb->packet_type) {
- rte_get_ptype_name(mb->packet_type, buf, sizeof(buf));
- printf(" - hw ptype: %s", buf);
- }
- sw_packet_type = rte_net_get_ptype(mb, &hdr_lens,
- RTE_PTYPE_ALL_MASK);
- rte_get_ptype_name(sw_packet_type, buf, sizeof(buf));
- printf(" - sw ptype: %s", buf);
- if (sw_packet_type & RTE_PTYPE_L2_MASK)
- printf(" - l2_len=%d", hdr_lens.l2_len);
- if (sw_packet_type & RTE_PTYPE_L3_MASK)
- printf(" - l3_len=%d", hdr_lens.l3_len);
- if (sw_packet_type & RTE_PTYPE_L4_MASK)
- printf(" - l4_len=%d", hdr_lens.l4_len);
- if (sw_packet_type & RTE_PTYPE_TUNNEL_MASK)
- printf(" - tunnel_len=%d", hdr_lens.tunnel_len);
- if (sw_packet_type & RTE_PTYPE_INNER_L2_MASK)
- printf(" - inner_l2_len=%d", hdr_lens.inner_l2_len);
- if (sw_packet_type & RTE_PTYPE_INNER_L3_MASK)
- printf(" - inner_l3_len=%d", hdr_lens.inner_l3_len);
- if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK)
- printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len);
- if (is_encapsulation) {
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
- struct udp_hdr *udp_hdr;
- uint8_t l2_len;
- uint8_t l3_len;
- uint8_t l4_len;
- uint8_t l4_proto;
- struct vxlan_hdr *vxlan_hdr;
-
- l2_len = sizeof(struct ether_hdr);
-
- /* Do not support ipv4 option field */
- if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
- l3_len = sizeof(struct ipv4_hdr);
- ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
- struct ipv4_hdr *,
- l2_len);
- l4_proto = ipv4_hdr->next_proto_id;
- } else {
- l3_len = sizeof(struct ipv6_hdr);
- ipv6_hdr = rte_pktmbuf_mtod_offset(mb,
- struct ipv6_hdr *,
- l2_len);
- l4_proto = ipv6_hdr->proto;
- }
- if (l4_proto == IPPROTO_UDP) {
- udp_hdr = rte_pktmbuf_mtod_offset(mb,
- struct udp_hdr *,
- l2_len + l3_len);
- l4_len = sizeof(struct udp_hdr);
- vxlan_hdr = rte_pktmbuf_mtod_offset(mb,
- struct vxlan_hdr *,
- l2_len + l3_len + l4_len);
-
- printf(" - VXLAN packet: packet type =%d, "
- "Destination UDP port =%d, VNI = %d",
- packet_type, RTE_BE_TO_CPU_16(udp_hdr->dst_port),
- rte_be_to_cpu_32(vxlan_hdr->vx_vni) >> 8);
- }
- }
- printf(" - Receive queue=0x%x", (unsigned) fs->rx_queue);
- printf("\n");
- rte_get_rx_ol_flag_list(mb->ol_flags, buf, sizeof(buf));
- printf(" ol_flags: %s\n", buf);
- rte_pktmbuf_free(mb);
- }
+ for (i = 0; i < nb_rx; i++)
+ rte_pktmbuf_free(pkts_burst[i]);
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
end_tsc = rte_rdtsc();
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index ee48db2a..9c0edcae 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -27,6 +27,7 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_cycles.h>
+#include <rte_malloc_heap.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_launch.h>
@@ -63,6 +64,22 @@
#include "testpmd.h"
+#ifndef MAP_HUGETLB
+/* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
+#define HUGE_FLAG (0x40000)
+#else
+#define HUGE_FLAG MAP_HUGETLB
+#endif
+
+#ifndef MAP_HUGE_SHIFT
+/* older kernels (or FreeBSD) will not have this define */
+#define HUGE_SHIFT (26)
+#else
+#define HUGE_SHIFT MAP_HUGE_SHIFT
+#endif
+
+#define EXTMEM_HEAP_NAME "extmem"
+
uint16_t verbose_level = 0; /**< Silent by default. */
int testpmd_logtype; /**< Log type for testpmd logs */
@@ -88,9 +105,13 @@ uint8_t numa_support = 1; /**< numa enabled by default */
uint8_t socket_num = UMA_NO_CONFIG;
/*
- * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
+ * Select mempool allocation type:
+ * - native: use regular DPDK memory
+ * - anon: use regular DPDK memory to create mempool, but populate using
+ * anonymous memory (may not be IOVA-contiguous)
+ * - xmem: use externally allocated hugepage memory
*/
-uint8_t mp_anon = 0;
+uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
/*
* Store specified sockets on which memory pool to be used by ports
@@ -157,6 +178,7 @@ struct fwd_engine * fwd_engines[] = {
&tx_only_engine,
&csum_fwd_engine,
&icmp_echo_engine,
+ &noisy_vnf_engine,
#if defined RTE_LIBRTE_PMD_SOFTNIC
&softnic_fwd_engine,
#endif
@@ -253,6 +275,40 @@ int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
/*
+ * Configurable value of buffered packets before sending.
+ */
+uint16_t noisy_tx_sw_bufsz;
+
+/*
+ * Configurable value of packet buffer timeout.
+ */
+uint16_t noisy_tx_sw_buf_flush_time;
+
+/*
+ * Configurable value for size of VNF internal memory area
+ * used for simulating noisy neighbour behaviour
+ */
+uint64_t noisy_lkup_mem_sz;
+
+/*
+ * Configurable value of number of random writes done in
+ * VNF simulation memory area.
+ */
+uint64_t noisy_lkup_num_writes;
+
+/*
+ * Configurable value of number of random reads done in
+ * VNF simulation memory area.
+ */
+uint64_t noisy_lkup_num_reads;
+
+/*
+ * Configurable value of number of random reads/writes done in
+ * VNF simulation memory area.
+ */
+uint64_t noisy_lkup_num_reads_writes;
+
+/*
* Receive Side Scaling (RSS) configuration.
*/
uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
@@ -289,6 +345,24 @@ uint8_t rmv_interrupt = 1; /* enabled by default */
uint8_t hot_plug = 0; /**< hotplug disabled by default. */
+/* After attach, port setup is called on event or by iterator */
+bool setup_on_probe_event = true;
+
+/* Pretty printing of ethdev events */
+static const char * const eth_event_desc[] = {
+ [RTE_ETH_EVENT_UNKNOWN] = "unknown",
+ [RTE_ETH_EVENT_INTR_LSC] = "link state change",
+ [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
+ [RTE_ETH_EVENT_INTR_RESET] = "reset",
+ [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
+ [RTE_ETH_EVENT_IPSEC] = "IPsec",
+ [RTE_ETH_EVENT_MACSEC] = "MACsec",
+ [RTE_ETH_EVENT_INTR_RMV] = "device removal",
+ [RTE_ETH_EVENT_NEW] = "device probed",
+ [RTE_ETH_EVENT_DESTROY] = "device released",
+ [RTE_ETH_EVENT_MAX] = NULL,
+};
+
/*
* Display or mask ether events
* Default to all events except VF_MBOX
@@ -334,7 +408,6 @@ lcoreid_t latencystats_lcore_id = -1;
*/
struct rte_eth_rxmode rx_mode = {
.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
};
struct rte_eth_txmode tx_mode = {
@@ -426,18 +499,16 @@ struct nvgre_encap_conf nvgre_encap_conf = {
};
/* Forward function declarations */
+static void setup_attached_port(portid_t pi);
static void map_port_queue_stats_mapping_registers(portid_t pi,
struct rte_port *port);
static void check_all_ports_link_status(uint32_t port_mask);
static int eth_event_callback(portid_t port_id,
enum rte_eth_event_type type,
void *param, void *ret_param);
-static void eth_dev_event_callback(char *device_name,
+static void eth_dev_event_callback(const char *device_name,
enum rte_dev_event_type type,
void *param);
-static int eth_dev_event_callback_register(void);
-static int eth_dev_event_callback_unregister(void);
-
/*
* Check if all the ports are started.
@@ -476,6 +547,8 @@ set_default_fwd_lcores_config(void)
nb_lc = 0;
for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
sock_num = rte_lcore_to_socket_id(i);
if (new_socket_id(sock_num)) {
if (num_sockets >= RTE_MAX_NUMA_NODES) {
@@ -485,8 +558,6 @@ set_default_fwd_lcores_config(void)
}
socket_ids[num_sockets++] = sock_num;
}
- if (!rte_lcore_is_enabled(i))
- continue;
if (i == rte_get_master_lcore())
continue;
fwd_lcores_cpuids[nb_lc++] = i;
@@ -513,9 +584,21 @@ set_default_fwd_ports_config(void)
portid_t pt_id;
int i = 0;
- RTE_ETH_FOREACH_DEV(pt_id)
+ RTE_ETH_FOREACH_DEV(pt_id) {
fwd_ports_ids[i++] = pt_id;
+ /* Update sockets info according to the attached device */
+ int socket_id = rte_eth_dev_socket_id(pt_id);
+ if (socket_id >= 0 && new_socket_id(socket_id)) {
+ if (num_sockets >= RTE_MAX_NUMA_NODES) {
+ rte_exit(EXIT_FAILURE,
+ "Total sockets greater than %u\n",
+ RTE_MAX_NUMA_NODES);
+ }
+ socket_ids[num_sockets++] = socket_id;
+ }
+ }
+
nb_cfg_ports = nb_ports;
nb_fwd_ports = nb_ports;
}
@@ -528,6 +611,236 @@ set_def_fwd_config(void)
set_default_fwd_ports_config();
}
+/* extremely pessimistic estimation of memory required to create a mempool */
+static int
+calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
+{
+ unsigned int n_pages, mbuf_per_pg, leftover;
+ uint64_t total_mem, mbuf_mem, obj_sz;
+
+ /* there is no good way to predict how much space the mempool will
+ * occupy because it will allocate chunks on the fly, and some of those
+ * will come from default DPDK memory while some will come from our
+ * external memory, so just assume 128MB will be enough for everyone.
+ */
+ uint64_t hdr_mem = 128 << 20;
+
+ /* account for possible non-contiguousness */
+ obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
+ if (obj_sz > pgsz) {
+ TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
+ return -1;
+ }
+
+ mbuf_per_pg = pgsz / obj_sz;
+ leftover = (nb_mbufs % mbuf_per_pg) > 0;
+ n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
+
+ mbuf_mem = n_pages * pgsz;
+
+ total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
+
+ if (total_mem > SIZE_MAX) {
+ TESTPMD_LOG(ERR, "Memory size too big\n");
+ return -1;
+ }
+ *out = (size_t)total_mem;
+
+ return 0;
+}
+
+static inline uint32_t
+bsf64(uint64_t v)
+{
+ return (uint32_t)__builtin_ctzll(v);
+}
+
+static inline uint32_t
+log2_u64(uint64_t v)
+{
+ if (v == 0)
+ return 0;
+ v = rte_align64pow2(v);
+ return bsf64(v);
+}
+
+static int
+pagesz_flags(uint64_t page_sz)
+{
+ /* as per mmap() manpage, all page sizes are log2 of page size
+ * shifted by MAP_HUGE_SHIFT
+ */
+ int log2 = log2_u64(page_sz);
+
+ return (log2 << HUGE_SHIFT);
+}
+
+static void *
+alloc_mem(size_t memsz, size_t pgsz, bool huge)
+{
+ void *addr;
+ int flags;
+
+ /* allocate anonymous hugepages */
+ flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ if (huge)
+ flags |= HUGE_FLAG | pagesz_flags(pgsz);
+
+ addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
+ if (addr == MAP_FAILED)
+ return NULL;
+
+ return addr;
+}
+
+struct extmem_param {
+ void *addr;
+ size_t len;
+ size_t pgsz;
+ rte_iova_t *iova_table;
+ unsigned int iova_table_len;
+};
+
+static int
+create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
+ bool huge)
+{
+ uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
+ RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
+ unsigned int cur_page, n_pages, pgsz_idx;
+ size_t mem_sz, cur_pgsz;
+ rte_iova_t *iovas = NULL;
+ void *addr;
+ int ret;
+
+ for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
+ /* skip anything that is too big */
+ if (pgsizes[pgsz_idx] > SIZE_MAX)
+ continue;
+
+ cur_pgsz = pgsizes[pgsz_idx];
+
+ /* if we were told not to allocate hugepages, override */
+ if (!huge)
+ cur_pgsz = sysconf(_SC_PAGESIZE);
+
+ ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
+ return -1;
+ }
+
+ /* allocate our memory */
+ addr = alloc_mem(mem_sz, cur_pgsz, huge);
+
+ /* if we couldn't allocate memory with a specified page size,
+ * that doesn't mean we can't do it with other page sizes, so
+ * try another one.
+ */
+ if (addr == NULL)
+ continue;
+
+ /* store IOVA addresses for every page in this memory area */
+ n_pages = mem_sz / cur_pgsz;
+
+ iovas = malloc(sizeof(*iovas) * n_pages);
+
+ if (iovas == NULL) {
+ TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
+ goto fail;
+ }
+ /* lock memory if it's not huge pages */
+ if (!huge)
+ mlock(addr, mem_sz);
+
+ /* populate IOVA addresses */
+ for (cur_page = 0; cur_page < n_pages; cur_page++) {
+ rte_iova_t iova;
+ size_t offset;
+ void *cur;
+
+ offset = cur_pgsz * cur_page;
+ cur = RTE_PTR_ADD(addr, offset);
+
+ /* touch the page before getting its IOVA */
+ *(volatile char *)cur = 0;
+
+ iova = rte_mem_virt2iova(cur);
+
+ iovas[cur_page] = iova;
+ }
+
+ break;
+ }
+ /* if we couldn't allocate anything */
+ if (iovas == NULL)
+ return -1;
+
+ param->addr = addr;
+ param->len = mem_sz;
+ param->pgsz = cur_pgsz;
+ param->iova_table = iovas;
+ param->iova_table_len = n_pages;
+
+ return 0;
+fail:
+ if (iovas)
+ free(iovas);
+ if (addr)
+ munmap(addr, mem_sz);
+
+ return -1;
+}
+
+static int
+setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
+{
+ struct extmem_param param;
+ int socket_id, ret;
+
+ memset(&param, 0, sizeof(param));
+
+ /* check if our heap exists */
+ socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
+ if (socket_id < 0) {
+ /* create our heap */
+ ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot create heap\n");
+ return -1;
+ }
+ }
+
+ ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot create memory area\n");
+ return -1;
+ }
+
+ /* we now have a valid memory area, so add it to heap */
+ ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
+ param.addr, param.len, param.iova_table,
+ param.iova_table_len, param.pgsz);
+
+ /* when using VFIO, memory is automatically mapped for DMA by EAL */
+
+ /* not needed any more */
+ free(param.iova_table);
+
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
+ munmap(param.addr, param.len);
+ return -1;
+ }
+
+ /* success */
+
+ TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
+ param.len >> 20);
+
+ return 0;
+}
+
/*
* Configuration initialisation done once at init time.
*/
@@ -546,27 +859,59 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
pool_name, nb_mbuf, mbuf_seg_size, socket_id);
- if (mp_anon != 0) {
- rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
- mb_size, (unsigned) mb_mempool_cache,
- sizeof(struct rte_pktmbuf_pool_private),
- socket_id, 0);
- if (rte_mp == NULL)
- goto err;
-
- if (rte_mempool_populate_anon(rte_mp) == 0) {
- rte_mempool_free(rte_mp);
- rte_mp = NULL;
- goto err;
- }
- rte_pktmbuf_pool_init(rte_mp, NULL);
- rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
- } else {
- /* wrapper to rte_mempool_create() */
- TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
- rte_mbuf_best_mempool_ops());
- rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
- mb_mempool_cache, 0, mbuf_seg_size, socket_id);
+ switch (mp_alloc_type) {
+ case MP_ALLOC_NATIVE:
+ {
+ /* wrapper to rte_mempool_create() */
+ TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
+ rte_mbuf_best_mempool_ops());
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size, socket_id);
+ break;
+ }
+ case MP_ALLOC_ANON:
+ {
+ rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
+ mb_size, (unsigned int) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ socket_id, 0);
+ if (rte_mp == NULL)
+ goto err;
+
+ if (rte_mempool_populate_anon(rte_mp) == 0) {
+ rte_mempool_free(rte_mp);
+ rte_mp = NULL;
+ goto err;
+ }
+ rte_pktmbuf_pool_init(rte_mp, NULL);
+ rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
+ break;
+ }
+ case MP_ALLOC_XMEM:
+ case MP_ALLOC_XMEM_HUGE:
+ {
+ int heap_socket;
+ bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
+
+ if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
+ rte_exit(EXIT_FAILURE, "Could not create external memory\n");
+
+ heap_socket =
+ rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
+ if (heap_socket < 0)
+ rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
+
+ TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
+ rte_mbuf_best_mempool_ops());
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size,
+ heap_socket);
+ break;
+ }
+ default:
+ {
+ rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
+ }
}
err:
@@ -707,12 +1052,6 @@ init_config(void)
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
- if (numa_support) {
- memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- }
-
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
sizeof(struct fwd_lcore *) * nb_lcores,
@@ -739,23 +1078,26 @@ init_config(void)
port->dev_conf.rxmode = rx_mode;
rte_eth_dev_info_get(pid, &port->dev_info);
- if (!(port->dev_info.rx_offload_capa &
- DEV_RX_OFFLOAD_CRC_STRIP))
- port->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_CRC_STRIP;
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ if (!(port->dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_MATCH_METADATA))
+ port->dev_conf.txmode.offloads &=
+ ~DEV_TX_OFFLOAD_MATCH_METADATA;
if (numa_support) {
if (port_numa[pid] != NUMA_NO_CONFIG)
port_per_socket[port_numa[pid]]++;
else {
uint32_t socket_id = rte_eth_dev_socket_id(pid);
- /* if socket_id is invalid, set to 0 */
+ /*
+ * if socket_id is invalid,
+ * set to the first available socket.
+ */
if (check_socket_id(socket_id) < 0)
- socket_id = 0;
+ socket_id = socket_ids[0];
port_per_socket[socket_id]++;
}
}
@@ -772,6 +1114,7 @@ init_config(void)
/* set flag to initialize port/queue */
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
+ port->tx_metadata = 0;
}
/*
@@ -911,9 +1254,12 @@ init_fwd_streams(void)
else {
port->socket_id = rte_eth_dev_socket_id(pid);
- /* if socket_id is invalid, set to 0 */
+ /*
+ * if socket_id is invalid,
+ * set to the first available socket.
+ */
if (check_socket_id(port->socket_id) < 0)
- port->socket_id = 0;
+ port->socket_id = socket_ids[0];
}
}
else {
@@ -1045,8 +1391,9 @@ fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
(uint64_t) (stats->ipackets + stats->imissed));
if (cur_fwd_eng == &csum_fwd_engine)
- printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
- port->rx_bad_ip_csum, port->rx_bad_l4_csum);
+ printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
+ port->rx_bad_ip_csum, port->rx_bad_l4_csum,
+ port->rx_bad_outer_l4_csum);
if ((stats->ierrors + stats->rx_nombuf) > 0) {
printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
@@ -1064,8 +1411,9 @@ fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
(uint64_t) (stats->ipackets + stats->imissed));
if (cur_fwd_eng == &csum_fwd_engine)
- printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
- port->rx_bad_ip_csum, port->rx_bad_l4_csum);
+ printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n",
+ port->rx_bad_ip_csum, port->rx_bad_l4_csum,
+ port->rx_bad_outer_l4_csum);
if ((stats->ierrors + stats->rx_nombuf) > 0) {
printf(" RX-error:%"PRIu64"\n", stats->ierrors);
printf(" RX-nombufs: %14"PRIu64"\n",
@@ -1129,7 +1477,9 @@ fwd_stream_stats_display(streamid_t stream_id)
/* if checksum mode */
if (cur_fwd_eng == &csum_fwd_engine) {
printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
- "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
+ "%-14u Rx- bad outer L4 checksum: %-14u\n",
+ fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
+ fs->rx_bad_outer_l4_csum);
}
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
@@ -1283,31 +1633,6 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
}
/*
- * Update the forward ports list.
- */
-void
-update_fwd_ports(portid_t new_pid)
-{
- unsigned int i;
- unsigned int new_nb_fwd_ports = 0;
- int move = 0;
-
- for (i = 0; i < nb_fwd_ports; ++i) {
- if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
- move = 1;
- else if (move)
- fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
- else
- new_nb_fwd_ports++;
- }
- if (new_pid < RTE_MAX_ETHPORTS)
- fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
-
- nb_fwd_ports = new_nb_fwd_ports;
- nb_cfg_ports = new_nb_fwd_ports;
-}
-
-/*
* Launch packet forwarding configuration.
*/
void
@@ -1383,6 +1708,7 @@ start_packet_forwarding(int with_tx_first)
fwd_streams[sm_id]->fwd_dropped = 0;
fwd_streams[sm_id]->rx_bad_ip_csum = 0;
fwd_streams[sm_id]->rx_bad_l4_csum = 0;
+ fwd_streams[sm_id]->rx_bad_outer_l4_csum = 0;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
@@ -1488,6 +1814,9 @@ stop_packet_forwarding(void)
ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
rx_bad_l4_csum;
+ ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
+ fwd_streams[sm_id]->rx_bad_outer_l4_csum;
+
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
fwd_cycles = (uint64_t) (fwd_cycles +
fwd_streams[sm_id]->core_cycles);
@@ -1620,18 +1949,6 @@ port_is_started(portid_t port_id)
return 1;
}
-static int
-port_is_closed(portid_t port_id)
-{
- if (port_id_is_invalid(port_id, ENABLED_WARN))
- return 0;
-
- if (ports[port_id].port_status != RTE_PORT_CLOSED)
- return 0;
-
- return 1;
-}
-
int
start_port(portid_t pid)
{
@@ -1640,7 +1957,6 @@ start_port(portid_t pid)
queueid_t qi;
struct rte_port *port;
struct ether_addr mac_addr;
- enum rte_eth_event_type event_type;
if (port_id_is_invalid(pid, ENABLED_WARN))
return 0;
@@ -1670,7 +1986,7 @@ start_port(portid_t pid)
return -1;
}
}
-
+ configure_rxtx_dump_callbacks(0);
printf("Configuring Port %d (socket %u)\n", pi,
port->socket_id);
/* configure port */
@@ -1769,7 +2085,7 @@ start_port(portid_t pid)
return -1;
}
}
-
+ configure_rxtx_dump_callbacks(verbose_level);
/* start port */
if (rte_eth_dev_start(pi) < 0) {
printf("Fail to start port %d\n", pi);
@@ -1796,20 +2112,6 @@ start_port(portid_t pid)
need_check_link_status = 1;
}
- for (event_type = RTE_ETH_EVENT_UNKNOWN;
- event_type < RTE_ETH_EVENT_MAX;
- event_type++) {
- diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
- event_type,
- eth_event_callback,
- NULL);
- if (diag) {
- printf("Failed to setup even callback for event %d\n",
- event_type);
- return -1;
- }
- }
-
if (need_check_link_status == 1 && !no_link_check)
check_all_ports_link_status(RTE_PORT_ALL);
else if (need_check_link_status == 0)
@@ -1868,6 +2170,28 @@ stop_port(portid_t pid)
printf("Done\n");
}
+static void
+remove_invalid_ports_in(portid_t *array, portid_t *total)
+{
+ portid_t i;
+ portid_t new_total = 0;
+
+ for (i = 0; i < *total; i++)
+ if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
+ array[new_total] = array[i];
+ new_total++;
+ }
+ *total = new_total;
+}
+
+static void
+remove_invalid_ports(void)
+{
+ remove_invalid_ports_in(ports_ids, &nb_ports);
+ remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
+ nb_cfg_ports = nb_fwd_ports;
+}
+
void
close_port(portid_t pid)
{
@@ -1910,6 +2234,8 @@ close_port(portid_t pid)
port_flow_flush(pi);
rte_eth_dev_close(pi);
+ remove_invalid_ports();
+
if (rte_atomic16_cmpset(&(port->port_status),
RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
printf("Port %d cannot be set to closed\n", pi);
@@ -1959,44 +2285,11 @@ reset_port(portid_t pid)
printf("Done\n");
}
-static int
-eth_dev_event_callback_register(void)
-{
- int ret;
-
- /* register the device event callback */
- ret = rte_dev_event_callback_register(NULL,
- eth_dev_event_callback, NULL);
- if (ret) {
- printf("Failed to register device event callback\n");
- return -1;
- }
-
- return 0;
-}
-
-
-static int
-eth_dev_event_callback_unregister(void)
-{
- int ret;
-
- /* unregister the device event callback */
- ret = rte_dev_event_callback_unregister(NULL,
- eth_dev_event_callback, NULL);
- if (ret < 0) {
- printf("Failed to unregister device event callback\n");
- return -1;
- }
-
- return 0;
-}
-
void
attach_port(char *identifier)
{
- portid_t pi = 0;
- unsigned int socket_id;
+ portid_t pi;
+ struct rte_dev_iterator iterator;
printf("Attaching a new port...\n");
@@ -2005,61 +2298,97 @@ attach_port(char *identifier)
return;
}
- if (rte_eth_dev_attach(identifier, &pi))
+ if (rte_dev_probe(identifier) != 0) {
+ TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
return;
+ }
+
+ /* first attach mode: event */
+ if (setup_on_probe_event) {
+ /* new ports are detected on RTE_ETH_EVENT_NEW event */
+ for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
+ if (ports[pi].port_status == RTE_PORT_HANDLING &&
+ ports[pi].need_setup != 0)
+ setup_attached_port(pi);
+ return;
+ }
+
+ /* second attach mode: iterator */
+ RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
+ /* setup ports matching the devargs used for probing */
+ if (port_is_forwarding(pi))
+ continue; /* port was already attached before */
+ setup_attached_port(pi);
+ }
+}
+
+static void
+setup_attached_port(portid_t pi)
+{
+ unsigned int socket_id;
socket_id = (unsigned)rte_eth_dev_socket_id(pi);
- /* if socket_id is invalid, set to 0 */
+ /* if socket_id is invalid, set to the first available socket. */
if (check_socket_id(socket_id) < 0)
- socket_id = 0;
+ socket_id = socket_ids[0];
reconfig(pi, socket_id);
rte_eth_promiscuous_enable(pi);
- ports_ids[nb_ports] = pi;
- nb_ports = rte_eth_dev_count_avail();
-
+ ports_ids[nb_ports++] = pi;
+ fwd_ports_ids[nb_fwd_ports++] = pi;
+ nb_cfg_ports = nb_fwd_ports;
+ ports[pi].need_setup = 0;
ports[pi].port_status = RTE_PORT_STOPPED;
- update_fwd_ports(pi);
-
printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
printf("Done\n");
}
void
-detach_port(portid_t port_id)
+detach_port_device(portid_t port_id)
{
- char name[RTE_ETH_NAME_MAX_LEN];
- uint16_t i;
+ struct rte_device *dev;
+ portid_t sibling;
- printf("Detaching a port...\n");
+ printf("Removing a device...\n");
- if (!port_is_closed(port_id)) {
- printf("Please close port first\n");
+ dev = rte_eth_devices[port_id].device;
+ if (dev == NULL) {
+ printf("Device already removed\n");
return;
}
- if (ports[port_id].flow_list)
- port_flow_flush(port_id);
+ if (ports[port_id].port_status != RTE_PORT_CLOSED) {
+ if (ports[port_id].port_status != RTE_PORT_STOPPED) {
+ printf("Port not stopped\n");
+ return;
+ }
+ printf("Port was not closed\n");
+ if (ports[port_id].flow_list)
+ port_flow_flush(port_id);
+ }
- if (rte_eth_dev_detach(port_id, name)) {
- TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
+ if (rte_dev_remove(dev) != 0) {
+ TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
return;
}
- for (i = 0; i < nb_ports; i++) {
- if (ports_ids[i] == port_id) {
- ports_ids[i] = ports_ids[nb_ports-1];
- ports_ids[nb_ports-1] = 0;
- break;
+ for (sibling = 0; sibling < RTE_MAX_ETHPORTS; sibling++) {
+ if (rte_eth_devices[sibling].device != dev)
+ continue;
+ /* reset mapping between old ports and removed device */
+ rte_eth_devices[sibling].device = NULL;
+ if (ports[sibling].port_status != RTE_PORT_CLOSED) {
+ /* sibling ports are forced to be closed */
+ ports[sibling].port_status = RTE_PORT_CLOSED;
+ printf("Port %u is closed\n", sibling);
}
}
- nb_ports = rte_eth_dev_count_avail();
- update_fwd_ports(RTE_MAX_ETHPORTS);
+ remove_invalid_ports();
- printf("Port %u is detached. Now total ports is %d\n",
- port_id, nb_ports);
+ printf("Device of port %u is detached\n", port_id);
+ printf("Now total ports is %d\n", nb_ports);
printf("Done\n");
return;
}
@@ -2092,20 +2421,32 @@ pmd_test_exit(void)
*/
device = rte_eth_devices[pt_id].device;
if (device && !strcmp(device->driver->name, "net_virtio_user"))
- detach_port(pt_id);
+ detach_port_device(pt_id);
}
}
if (hot_plug) {
ret = rte_dev_event_monitor_stop();
- if (ret)
+ if (ret) {
RTE_LOG(ERR, EAL,
"fail to stop device event monitor.");
+ return;
+ }
- ret = eth_dev_event_callback_unregister();
- if (ret)
+ ret = rte_dev_event_callback_unregister(NULL,
+ eth_dev_event_callback, NULL);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "fail to unregister device event callback.\n");
+ return;
+ }
+
+ ret = rte_dev_hotplug_handle_disable();
+ if (ret) {
RTE_LOG(ERR, EAL,
- "fail to unregister all event callbacks.");
+ "fail to disable hotplug handling.\n");
+ return;
+ }
}
printf("\nBye...\n");
@@ -2192,7 +2533,7 @@ rmv_event_callback(void *arg)
stop_port(port_id);
no_link_check = org_no_link_check;
close_port(port_id);
- detach_port(port_id);
+ detach_port_device(port_id);
if (need_to_start)
start_packet_forwarding(0);
}
@@ -2202,38 +2543,27 @@ static int
eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
{
- static const char * const event_desc[] = {
- [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
- [RTE_ETH_EVENT_INTR_LSC] = "LSC",
- [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
- [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
- [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
- [RTE_ETH_EVENT_IPSEC] = "IPsec",
- [RTE_ETH_EVENT_MACSEC] = "MACsec",
- [RTE_ETH_EVENT_INTR_RMV] = "device removal",
- [RTE_ETH_EVENT_NEW] = "device probed",
- [RTE_ETH_EVENT_DESTROY] = "device released",
- [RTE_ETH_EVENT_MAX] = NULL,
- };
-
RTE_SET_USED(param);
RTE_SET_USED(ret_param);
if (type >= RTE_ETH_EVENT_MAX) {
- fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
+ fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
port_id, __func__, type);
fflush(stderr);
} else if (event_print_mask & (UINT32_C(1) << type)) {
- printf("\nPort %" PRIu8 ": %s event\n", port_id,
- event_desc[type]);
+ printf("\nPort %" PRIu16 ": %s event\n", port_id,
+ eth_event_desc[type]);
fflush(stdout);
}
- if (port_id_is_invalid(port_id, DISABLED_WARN))
- return 0;
-
switch (type) {
+ case RTE_ETH_EVENT_NEW:
+ ports[port_id].need_setup = 1;
+ ports[port_id].port_status = RTE_PORT_HANDLING;
+ break;
case RTE_ETH_EVENT_INTR_RMV:
+ if (port_id_is_invalid(port_id, DISABLED_WARN))
+ break;
if (rte_eal_alarm_set(100000,
rmv_event_callback, (void *)(intptr_t)port_id))
fprintf(stderr, "Could not set up deferred device removal\n");
@@ -2244,11 +2574,36 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
return 0;
}
+static int
+register_eth_event_callback(void)
+{
+ int ret;
+ enum rte_eth_event_type event;
+
+ for (event = RTE_ETH_EVENT_UNKNOWN;
+ event < RTE_ETH_EVENT_MAX; event++) {
+ ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
+ event,
+ eth_event_callback,
+ NULL);
+ if (ret != 0) {
+ TESTPMD_LOG(ERR, "Failed to register callback for "
+ "%s event\n", eth_event_desc[event]);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/* This function is used by the interrupt thread */
static void
-eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
+eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
__rte_unused void *arg)
{
+ uint16_t port_id;
+ int ret;
+
if (type >= RTE_DEV_EVENT_MAX) {
fprintf(stderr, "%s called upon invalid event %d\n",
__func__, type);
@@ -2259,9 +2614,13 @@ eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
case RTE_DEV_EVENT_REMOVE:
RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
device_name);
- /* TODO: After finish failure handle, begin to stop
- * packet forward, stop port, close port, detach port.
- */
+ ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
+ device_name);
+ return;
+ }
+ rmv_event_callback((void *)(intptr_t)port_id);
break;
case RTE_DEV_EVENT_ADD:
RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
@@ -2650,6 +3009,11 @@ init_port(void)
"rte_zmalloc(%d struct rte_port) failed\n",
RTE_MAX_ETHPORTS);
}
+
+ /* Initialize ports NUMA structures */
+ memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
}
static void
@@ -2716,6 +3080,10 @@ main(int argc, char** argv)
rte_panic("Cannot register log type");
rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
+ ret = register_eth_event_callback();
+ if (ret != 0)
+ rte_panic("Cannot register for ethdev events");
+
#ifdef RTE_LIBRTE_PDUMP
/* initialize packet capture framework */
rte_pdump_init(NULL);
@@ -2784,14 +3152,27 @@ main(int argc, char** argv)
init_config();
if (hot_plug) {
- /* enable hot plug monitoring */
+ ret = rte_dev_hotplug_handle_enable();
+ if (ret) {
+ RTE_LOG(ERR, EAL,
+ "fail to enable hotplug handling.");
+ return -1;
+ }
+
ret = rte_dev_event_monitor_start();
if (ret) {
- rte_errno = EINVAL;
+ RTE_LOG(ERR, EAL,
+ "fail to start device event monitoring.");
return -1;
}
- eth_dev_event_callback_register();
+ ret = rte_dev_event_callback_register(NULL,
+ eth_dev_event_callback, NULL);
+ if (ret) {
+ RTE_LOG(ERR, EAL,
+ "fail to register device event callback\n");
+ return -1;
+ }
}
if (start_port(RTE_PORT_ALL) != 0)
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index a1f66147..3ff11e64 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -5,6 +5,8 @@
#ifndef _TESTPMD_H_
#define _TESTPMD_H_
+#include <stdbool.h>
+
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_gro.h>
@@ -69,6 +71,16 @@ enum {
PORT_TOPOLOGY_LOOP,
};
+enum {
+ MP_ALLOC_NATIVE, /**< allocate and populate mempool natively */
+ MP_ALLOC_ANON,
+ /**< allocate mempool natively, but populate using anonymous memory */
+ MP_ALLOC_XMEM,
+ /**< allocate and populate mempool using anonymous memory */
+ MP_ALLOC_XMEM_HUGE
+ /**< allocate and populate mempool using anonymous hugepage memory */
+};
+
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
/**
* The data structure associated with RX and TX packet burst statistics
@@ -112,6 +124,8 @@ struct fwd_stream {
unsigned int fwd_dropped; /**< received packets not forwarded */
unsigned int rx_bad_ip_csum ; /**< received packets has bad ip checksum */
unsigned int rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
+ unsigned int rx_bad_outer_l4_csum;
+ /**< received packets has bad outer l4 checksum */
unsigned int gro_times; /**< GRO operation times */
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t core_cycles; /**< used for RX and TX processing */
@@ -124,15 +138,12 @@ struct fwd_stream {
/** Descriptor for a single flow. */
struct port_flow {
- size_t size; /**< Allocated space including data[]. */
struct port_flow *next; /**< Next flow in list. */
struct port_flow *tmp; /**< Temporary linking. */
uint32_t id; /**< Flow rule ID. */
struct rte_flow *flow; /**< Opaque flow object returned by PMD. */
- struct rte_flow_attr attr; /**< Attributes. */
- struct rte_flow_item *pattern; /**< Pattern. */
- struct rte_flow_action *actions; /**< Actions. */
- uint8_t data[]; /**< Storage for pattern/actions. */
+ struct rte_flow_conv_rule rule; /* Saved flow rule description. */
+ uint8_t data[]; /**< Storage for flow rule description */
};
#ifdef SOFTNIC
@@ -165,9 +176,12 @@ struct rte_port {
void *fwd_ctx; /**< Forwarding mode context */
uint64_t rx_bad_ip_csum; /**< rx pkts with bad ip checksum */
uint64_t rx_bad_l4_csum; /**< rx pkts with bad l4 checksum */
+ uint64_t rx_bad_outer_l4_csum;
+ /**< rx pkts with bad outer l4 checksum */
uint8_t tx_queue_stats_mapping_enabled;
uint8_t rx_queue_stats_mapping_enabled;
volatile uint16_t port_status; /**< port started or not */
+ uint8_t need_setup; /**< port just attached */
uint8_t need_reconfig; /**< need reconfiguring port or not */
uint8_t need_reconfig_queues; /**< need reconfiguring queues or not */
uint8_t rss_flag; /**< enable rss or not */
@@ -180,9 +194,14 @@ struct rte_port {
uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
uint8_t slave_flag; /**< bonding slave port */
struct port_flow *flow_list; /**< Associated flows. */
+ const struct rte_eth_rxtx_callback *rx_dump_cb[MAX_QUEUE_ID+1];
+ const struct rte_eth_rxtx_callback *tx_dump_cb[MAX_QUEUE_ID+1];
#ifdef SOFTNIC
struct softnic_port softport; /**< softnic params */
#endif
+ /**< metadata value to insert in Tx packets. */
+ rte_be32_t tx_metadata;
+ const struct rte_eth_rxtx_callback *tx_set_md_cb[MAX_QUEUE_ID+1];
};
/**
@@ -243,6 +262,7 @@ extern struct fwd_engine rx_only_engine;
extern struct fwd_engine tx_only_engine;
extern struct fwd_engine csum_fwd_engine;
extern struct fwd_engine icmp_echo_engine;
+extern struct fwd_engine noisy_vnf_engine;
#ifdef SOFTNIC
extern struct fwd_engine softnic_fwd_engine;
#endif
@@ -304,13 +324,15 @@ extern uint8_t numa_support; /**< set by "--numa" parameter */
extern uint16_t port_topology; /**< set by "--port-topology" parameter */
extern uint8_t no_flush_rx; /**<set by "--no-flush-rx" parameter */
extern uint8_t flow_isolate_all; /**< set by "--flow-isolate-all */
-extern uint8_t mp_anon; /**< set by "--mp-anon" parameter */
+extern uint8_t mp_alloc_type;
+/**< set by "--mp-anon" or "--mp-alloc" parameter */
extern uint8_t no_link_check; /**<set by "--disable-link-check" parameter */
extern volatile int test_done; /* stop packet forwarding when set to 1. */
extern uint8_t lsc_interrupt; /**< disabled by "--no-lsc-interrupt" parameter */
extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */
extern uint32_t event_print_mask;
/**< set by "--print-event xxxx" and "--mask-event xxxx parameters */
+extern bool setup_on_probe_event; /**< disabled by port setup-on iterator */
extern uint8_t hot_plug; /**< enable by "--hot-plug" parameter */
extern int do_mlockall; /**< set by "--mlockall" or "--no-mlockall" parameter */
@@ -375,6 +397,13 @@ extern int8_t rx_drop_en;
extern int16_t tx_free_thresh;
extern int16_t tx_rs_thresh;
+extern uint16_t noisy_tx_sw_bufsz;
+extern uint16_t noisy_tx_sw_buf_flush_time;
+extern uint64_t noisy_lkup_mem_sz;
+extern uint64_t noisy_lkup_num_writes;
+extern uint64_t noisy_lkup_num_reads;
+extern uint64_t noisy_lkup_num_reads_writes;
+
extern uint8_t dcb_config;
extern uint8_t dcb_test;
@@ -487,6 +516,68 @@ struct nvgre_encap_conf {
};
struct nvgre_encap_conf nvgre_encap_conf;
+/* L2 encap parameters. */
+struct l2_encap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+ rte_be16_t vlan_tci;
+ uint8_t eth_src[ETHER_ADDR_LEN];
+ uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct l2_encap_conf l2_encap_conf;
+
+/* L2 decap parameters. */
+struct l2_decap_conf {
+ uint32_t select_vlan:1;
+};
+struct l2_decap_conf l2_decap_conf;
+
+/* MPLSoGRE encap parameters. */
+struct mplsogre_encap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+ uint8_t label[3];
+ rte_be32_t ipv4_src;
+ rte_be32_t ipv4_dst;
+ uint8_t ipv6_src[16];
+ uint8_t ipv6_dst[16];
+ rte_be16_t vlan_tci;
+ uint8_t eth_src[ETHER_ADDR_LEN];
+ uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct mplsogre_encap_conf mplsogre_encap_conf;
+
+/* MPLSoGRE decap parameters. */
+struct mplsogre_decap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+};
+struct mplsogre_decap_conf mplsogre_decap_conf;
+
+/* MPLSoUDP encap parameters. */
+struct mplsoudp_encap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+ uint8_t label[3];
+ rte_be16_t udp_src;
+ rte_be16_t udp_dst;
+ rte_be32_t ipv4_src;
+ rte_be32_t ipv4_dst;
+ uint8_t ipv6_src[16];
+ uint8_t ipv6_dst[16];
+ rte_be16_t vlan_tci;
+ uint8_t eth_src[ETHER_ADDR_LEN];
+ uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct mplsoudp_encap_conf mplsoudp_encap_conf;
+
+/* MPLSoUDP decap parameters. */
+struct mplsoudp_decap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+};
+struct mplsoudp_decap_conf mplsoudp_decap_conf;
+
static inline unsigned int
lcore_num(void)
{
@@ -594,6 +685,8 @@ void nic_xstats_display(portid_t port_id);
void nic_xstats_clear(portid_t port_id);
void nic_stats_mapping_display(portid_t port_id);
void port_infos_display(portid_t port_id);
+void port_summary_display(portid_t port_id);
+void port_summary_header_display(void);
void port_offload_cap_display(portid_t port_id);
void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
@@ -688,7 +781,7 @@ void stop_port(portid_t pid);
void close_port(portid_t pid);
void reset_port(portid_t pid);
void attach_port(char *identifier);
-void detach_port(portid_t port_id);
+void detach_port_device(portid_t port_id);
int all_ports_stopped(void);
int port_is_stopped(portid_t port_id);
int port_is_started(portid_t port_id);
@@ -708,8 +801,7 @@ int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate);
int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate,
uint64_t q_msk);
-void port_rss_hash_conf_show(portid_t port_id, char rss_info[],
- int show_rss_key);
+void port_rss_hash_conf_show(portid_t port_id, int show_rss_key);
void port_rss_hash_key_update(portid_t port_id, char rss_type[],
uint8_t *hash_key, uint hash_key_len);
int rx_queue_id_is_invalid(queueid_t rxq_id);
@@ -743,6 +835,25 @@ int check_nb_rxq(queueid_t rxq);
queueid_t get_allowed_max_nb_txq(portid_t *pid);
int check_nb_txq(queueid_t txq);
+uint16_t dump_rx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
+ uint16_t nb_pkts, __rte_unused uint16_t max_pkts,
+ __rte_unused void *user_param);
+
+uint16_t dump_tx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
+ uint16_t nb_pkts, __rte_unused void *user_param);
+
+void add_rx_dump_callbacks(portid_t portid);
+void remove_rx_dump_callbacks(portid_t portid);
+void add_tx_dump_callbacks(portid_t portid);
+void remove_tx_dump_callbacks(portid_t portid);
+void configure_rxtx_dump_callbacks(uint16_t verbose);
+
+uint16_t tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ __rte_unused void *user_param);
+void add_tx_md_callback(portid_t portid);
+void remove_tx_md_callback(portid_t portid);
+
/*
* Work-around of a compilation error with ICC on invocations of the
* rte_be_to_cpu_16() function.
diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c
new file mode 100644
index 00000000..687bfa49
--- /dev/null
+++ b/app/test-pmd/util.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2018 Mellanox Technology
+ */
+
+#include <stdio.h>
+
+#include <rte_net.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+
+#include "testpmd.h"
+
+static inline void
+print_ether_addr(const char *what, struct ether_addr *eth_addr)
+{
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", what, buf);
+}
+
+static inline void
+dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
+ uint16_t nb_pkts, int is_rx)
+{
+ struct rte_mbuf *mb;
+ struct ether_hdr *eth_hdr;
+ uint16_t eth_type;
+ uint64_t ol_flags;
+ uint16_t i, packet_type;
+ uint16_t is_encapsulation;
+ char buf[256];
+ struct rte_net_hdr_lens hdr_lens;
+ uint32_t sw_packet_type;
+ uint16_t udp_port;
+ uint32_t vx_vni;
+
+ if (!nb_pkts)
+ return;
+ printf("port %u/queue %u: %s %u packets\n",
+ port_id, queue,
+ is_rx ? "received" : "sent",
+ (unsigned int) nb_pkts);
+ for (i = 0; i < nb_pkts; i++) {
+ mb = pkts[i];
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+ eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
+ ol_flags = mb->ol_flags;
+ packet_type = mb->packet_type;
+ is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type);
+
+ print_ether_addr(" src=", &eth_hdr->s_addr);
+ print_ether_addr(" - dst=", &eth_hdr->d_addr);
+ printf(" - type=0x%04x - length=%u - nb_segs=%d",
+ eth_type, (unsigned int) mb->pkt_len,
+ (int)mb->nb_segs);
+ if (ol_flags & PKT_RX_RSS_HASH) {
+ printf(" - RSS hash=0x%x", (unsigned int) mb->hash.rss);
+ printf(" - RSS queue=0x%x", (unsigned int) queue);
+ }
+ if (ol_flags & PKT_RX_FDIR) {
+ printf(" - FDIR matched ");
+ if (ol_flags & PKT_RX_FDIR_ID)
+ printf("ID=0x%x",
+ mb->hash.fdir.hi);
+ else if (ol_flags & PKT_RX_FDIR_FLX)
+ printf("flex bytes=0x%08x %08x",
+ mb->hash.fdir.hi, mb->hash.fdir.lo);
+ else
+ printf("hash=0x%x ID=0x%x ",
+ mb->hash.fdir.hash, mb->hash.fdir.id);
+ }
+ if (ol_flags & PKT_RX_TIMESTAMP)
+ printf(" - timestamp %"PRIu64" ", mb->timestamp);
+ if (ol_flags & PKT_RX_QINQ)
+ printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
+ mb->vlan_tci, mb->vlan_tci_outer);
+ else if (ol_flags & PKT_RX_VLAN)
+ printf(" - VLAN tci=0x%x", mb->vlan_tci);
+ if (mb->packet_type) {
+ rte_get_ptype_name(mb->packet_type, buf, sizeof(buf));
+ printf(" - hw ptype: %s", buf);
+ }
+ sw_packet_type = rte_net_get_ptype(mb, &hdr_lens,
+ RTE_PTYPE_ALL_MASK);
+ rte_get_ptype_name(sw_packet_type, buf, sizeof(buf));
+ printf(" - sw ptype: %s", buf);
+ if (sw_packet_type & RTE_PTYPE_L2_MASK)
+ printf(" - l2_len=%d", hdr_lens.l2_len);
+ if (sw_packet_type & RTE_PTYPE_L3_MASK)
+ printf(" - l3_len=%d", hdr_lens.l3_len);
+ if (sw_packet_type & RTE_PTYPE_L4_MASK)
+ printf(" - l4_len=%d", hdr_lens.l4_len);
+ if (sw_packet_type & RTE_PTYPE_TUNNEL_MASK)
+ printf(" - tunnel_len=%d", hdr_lens.tunnel_len);
+ if (sw_packet_type & RTE_PTYPE_INNER_L2_MASK)
+ printf(" - inner_l2_len=%d", hdr_lens.inner_l2_len);
+ if (sw_packet_type & RTE_PTYPE_INNER_L3_MASK)
+ printf(" - inner_l3_len=%d", hdr_lens.inner_l3_len);
+ if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK)
+ printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len);
+ if (is_encapsulation) {
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct udp_hdr *udp_hdr;
+ uint8_t l2_len;
+ uint8_t l3_len;
+ uint8_t l4_len;
+ uint8_t l4_proto;
+ struct vxlan_hdr *vxlan_hdr;
+
+ l2_len = sizeof(struct ether_hdr);
+
+ /* Do not support ipv4 option field */
+ if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
+ l3_len = sizeof(struct ipv4_hdr);
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct ipv4_hdr *,
+ l2_len);
+ l4_proto = ipv4_hdr->next_proto_id;
+ } else {
+ l3_len = sizeof(struct ipv6_hdr);
+ ipv6_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct ipv6_hdr *,
+ l2_len);
+ l4_proto = ipv6_hdr->proto;
+ }
+ if (l4_proto == IPPROTO_UDP) {
+ udp_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct udp_hdr *,
+ l2_len + l3_len);
+ l4_len = sizeof(struct udp_hdr);
+ vxlan_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct vxlan_hdr *,
+ l2_len + l3_len + l4_len);
+ udp_port = RTE_BE_TO_CPU_16(udp_hdr->dst_port);
+ vx_vni = rte_be_to_cpu_32(vxlan_hdr->vx_vni);
+ printf(" - VXLAN packet: packet type =%d, "
+ "Destination UDP port =%d, VNI = %d",
+ packet_type, udp_port, vx_vni >> 8);
+ }
+ }
+ printf(" - %s queue=0x%x", is_rx ? "Receive" : "Send",
+ (unsigned int) queue);
+ printf("\n");
+ rte_get_rx_ol_flag_list(mb->ol_flags, buf, sizeof(buf));
+ printf(" ol_flags: %s\n", buf);
+ }
+}
+
+uint16_t
+dump_rx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
+ uint16_t nb_pkts, __rte_unused uint16_t max_pkts,
+ __rte_unused void *user_param)
+{
+ dump_pkt_burst(port_id, queue, pkts, nb_pkts, 1);
+ return nb_pkts;
+}
+
+uint16_t
+dump_tx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
+ uint16_t nb_pkts, __rte_unused void *user_param)
+{
+ dump_pkt_burst(port_id, queue, pkts, nb_pkts, 0);
+ return nb_pkts;
+}
+
+uint16_t
+tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ __rte_unused void *user_param)
+{
+ uint16_t i = 0;
+
+ /*
+ * Add metadata value to every Tx packet,
+ * and set ol_flags accordingly.
+ */
+ for (i = 0; i < nb_pkts; i++) {
+ pkts[i]->tx_metadata = ports[port_id].tx_metadata;
+ pkts[i]->ol_flags |= PKT_TX_METADATA;
+ }
+ return nb_pkts;
+}
+
+void
+add_tx_md_callback(portid_t portid)
+{
+ struct rte_eth_dev_info dev_info;
+ uint16_t queue;
+
+ if (port_id_is_invalid(portid, ENABLED_WARN))
+ return;
+ rte_eth_dev_info_get(portid, &dev_info);
+ for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
+ if (!ports[portid].tx_set_md_cb[queue])
+ ports[portid].tx_set_md_cb[queue] =
+ rte_eth_add_tx_callback(portid, queue,
+ tx_pkt_set_md, NULL);
+}
+
+void
+remove_tx_md_callback(portid_t portid)
+{
+ struct rte_eth_dev_info dev_info;
+ uint16_t queue;
+
+ if (port_id_is_invalid(portid, ENABLED_WARN))
+ return;
+ rte_eth_dev_info_get(portid, &dev_info);
+ for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
+ if (ports[portid].tx_set_md_cb[queue]) {
+ rte_eth_remove_tx_callback(portid, queue,
+ ports[portid].tx_set_md_cb[queue]);
+ ports[portid].tx_set_md_cb[queue] = NULL;
+ }
+}