summaryrefslogtreecommitdiffstats
path: root/app/test-eventdev/test_perf_common.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835 /app/test-eventdev/test_perf_common.c
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'app/test-eventdev/test_perf_common.c')
-rw-r--r--app/test-eventdev/test_perf_common.c276
1 files changed, 250 insertions, 26 deletions
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 59fa0a49..d33cb2cd 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -72,6 +72,128 @@ perf_producer(void *arg)
return 0;
}
+static inline int
+perf_event_timer_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m;
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks = opt->optm_timer_tick_nsec ?
+ (timeout_ticks * opt->timer_tick_nsec)
+ / opt->optm_timer_tick_nsec : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get(pool, (void **)&m) < 0)
+ continue;
+
+ m->tim = tim;
+ m->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m->tim.ev.event_ptr = m;
+ m->timestamp = rte_get_timer_cycles();
+ while (rte_event_timer_arm_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)&m, 1) != 1) {
+ if (t->done)
+ break;
+ rte_pause();
+ m->timestamp = rte_get_timer_cycles();
+ }
+ arm_latency += rte_get_timer_cycles() - m->timestamp;
+ count++;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(), (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000));
+ return 0;
+}
+
+static inline int
+perf_event_timer_producer_burst(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks = opt->optm_timer_tick_nsec ?
+ (timeout_ticks * opt->timer_tick_nsec)
+ / opt->optm_timer_tick_nsec : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ rte_event_timer_arm_tmo_tick_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)m,
+ tim.timeout_ticks,
+ BURST_SIZE);
+ arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
+ count += BURST_SIZE;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(), (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000));
+ return 0;
+}
+
static int
perf_producer_wrapper(void *arg)
{
@@ -80,6 +202,12 @@ perf_producer_wrapper(void *arg)
/* Launch the producer function only in case of synthetic producer. */
if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
return perf_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+ !t->opt->timdev_use_burst)
+ return perf_event_timer_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+ t->opt->timdev_use_burst)
+ return perf_event_timer_producer_burst(arg);
return 0;
}
@@ -146,8 +274,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
port_idx++;
}
- const uint64_t total_pkts = opt->nb_pkts *
- evt_nr_active_lcores(opt->plcores);
+ const uint64_t total_pkts = t->outstand_pkts;
uint64_t dead_lock_cycles = rte_get_timer_cycles();
int64_t dead_lock_remaining = total_pkts;
@@ -189,7 +316,9 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
- if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
t->done = true;
rte_smp_wmb();
break;
@@ -226,7 +355,7 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
memset(&queue_conf, 0,
sizeof(struct rte_event_eth_rx_adapter_queue_conf));
queue_conf.ev.sched_type = opt->sched_type_list[0];
- for (prod = 0; prod < rte_eth_dev_count(); prod++) {
+ RTE_ETH_FOREACH_DEV(prod) {
uint32_t cap;
ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
@@ -283,6 +412,65 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
return ret;
}
+static int
+perf_event_timer_adapter_setup(struct test_perf *t)
+{
+ int i;
+ int ret;
+ struct rte_event_timer_adapter_info adapter_info;
+ struct rte_event_timer_adapter *wl;
+ uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
+ uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
+
+ if (nb_producers == 1)
+ flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
+
+ for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
+ struct rte_event_timer_adapter_conf config = {
+ .event_dev_id = t->opt->dev_id,
+ .timer_adapter_id = i,
+ .timer_tick_ns = t->opt->timer_tick_nsec,
+ .max_tmo_ns = t->opt->max_tmo_nsec,
+ .nb_timers = 2 * 1024 * 1024,
+ .flags = flags,
+ };
+
+ wl = rte_event_timer_adapter_create(&config);
+ if (wl == NULL) {
+ evt_err("failed to create event timer ring %d", i);
+ return rte_errno;
+ }
+
+ memset(&adapter_info, 0,
+ sizeof(struct rte_event_timer_adapter_info));
+ rte_event_timer_adapter_get_info(wl, &adapter_info);
+ t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
+
+ if (!(adapter_info.caps &
+ RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id;
+
+ rte_event_timer_adapter_service_id_get(wl,
+ &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for timer adapter\n");
+ return ret;
+ }
+ rte_service_runstate_set(service_id, 1);
+ }
+
+ ret = rte_event_timer_adapter_start(wl);
+ if (ret) {
+ evt_err("failed to Start event timer adapter %d", i);
+ return ret;
+ }
+ t->timer_adptr[i] = wl;
+ }
+ return 0;
+}
+
int
perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t stride, uint8_t nb_queues,
@@ -326,6 +514,18 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
if (ret)
return ret;
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ prod = 0;
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+ p->queue_id = prod * stride;
+ p->t = t;
+ prod++;
+ }
+
+ ret = perf_event_timer_adapter_setup(t);
+ if (ret)
+ return ret;
} else {
prod = 0;
for ( ; port < perf_nb_event_ports(opt); port++) {
@@ -415,10 +615,13 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
}
/* Fixups */
- if (opt->nb_stages == 1 && opt->fwd_latency) {
+ if ((opt->nb_stages == 1 &&
+ opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
+ opt->fwd_latency) {
evt_info("fwd_latency is valid when nb_stages > 1, disabling");
opt->fwd_latency = 0;
}
+
if (opt->fwd_latency && !opt->q_priority) {
evt_info("enabled queue priority for latency measurement");
opt->q_priority = 1;
@@ -447,8 +650,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
void
perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
{
- RTE_SET_USED(test);
+ int i;
+ struct test_perf *t = evt_test_priv(test);
+ if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ for (i = 0; i < opt->nb_timer_adptrs; i++)
+ rte_event_timer_adapter_stop(t->timer_adptr[i]);
+ }
rte_event_dev_stop(opt->dev_id);
rte_event_dev_close(opt->dev_id);
}
@@ -465,20 +673,14 @@ perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
int
perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
{
- int i;
+ uint16_t i;
struct test_perf *t = evt_test_priv(test);
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0,
- .hw_ip_checksum = 0,
- .hw_vlan_filter = 0,
- .hw_vlan_strip = 0,
- .hw_vlan_extend = 0,
- .jumbo_frame = 0,
- .hw_strip_crc = 1,
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
.rss_conf = {
@@ -488,19 +690,33 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
},
};
- if (opt->prod_type == EVT_PROD_TYPE_SYNT)
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
return 0;
- if (!rte_eth_dev_count()) {
+ if (!rte_eth_dev_count_avail()) {
evt_err("No ethernet ports found.");
return -ENODEV;
}
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ rte_eth_dev_info_get(i, &dev_info);
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ evt_info("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ i,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
- if (rte_eth_dev_configure(i, 1, 1,
- &port_conf)
- < 0) {
+ if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
evt_err("Failed to configure eth port [%d]", i);
return -EINVAL;
}
@@ -527,14 +743,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
{
- int i;
+ uint16_t i;
RTE_SET_USED(test);
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
rte_event_eth_rx_adapter_stop(i);
rte_eth_dev_stop(i);
- rte_eth_dev_close(i);
}
}
}
@@ -544,7 +759,8 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
{
struct test_perf *t = evt_test_priv(test);
- if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
t->pool = rte_mempool_create(test->name, /* mempool name */
opt->pool_sz, /* number of elements*/
sizeof(struct perf_elt), /* element size*/
@@ -594,10 +810,18 @@ perf_test_setup(struct evt_test *test, struct evt_options *opt)
struct test_perf *t = evt_test_priv(test);
- t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
+ if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ t->outstand_pkts = opt->nb_timers *
+ evt_nr_active_lcores(opt->plcores);
+ t->nb_pkts = opt->nb_timers;
+ } else {
+ t->outstand_pkts = opt->nb_pkts *
+ evt_nr_active_lcores(opt->plcores);
+ t->nb_pkts = opt->nb_pkts;
+ }
+
t->nb_workers = evt_nr_active_lcores(opt->wlcores);
t->done = false;
- t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
t->result = EVT_TEST_FAILED;
t->opt = opt;