diff options
Diffstat (limited to 'app/test-eventdev')
-rw-r--r-- | app/test-eventdev/evt_main.c | 42 | ||||
-rw-r--r-- | app/test-eventdev/evt_options.c | 132 | ||||
-rw-r--r-- | app/test-eventdev/evt_options.h | 35 | ||||
-rw-r--r-- | app/test-eventdev/evt_test.h | 3 | ||||
-rw-r--r-- | app/test-eventdev/meson.build | 18 | ||||
-rw-r--r-- | app/test-eventdev/test_order_atq.c | 12 | ||||
-rw-r--r-- | app/test-eventdev/test_order_queue.c | 12 | ||||
-rw-r--r-- | app/test-eventdev/test_perf_atq.c | 12 | ||||
-rw-r--r-- | app/test-eventdev/test_perf_common.c | 276 | ||||
-rw-r--r-- | app/test-eventdev/test_perf_common.h | 14 | ||||
-rw-r--r-- | app/test-eventdev/test_perf_queue.c | 9 | ||||
-rw-r--r-- | app/test-eventdev/test_pipeline_atq.c | 4 | ||||
-rw-r--r-- | app/test-eventdev/test_pipeline_common.c | 33 | ||||
-rw-r--r-- | app/test-eventdev/test_pipeline_queue.c | 6 |
14 files changed, 494 insertions, 114 deletions
diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c index 57bb9457..a8d304ba 100644 --- a/app/test-eventdev/evt_main.c +++ b/app/test-eventdev/evt_main.c @@ -20,29 +20,41 @@ struct evt_test *test; static void signal_handler(int signum) { - if (signum == SIGINT || signum == SIGTERM) { + int i; + static uint8_t once; + + if ((signum == SIGINT || signum == SIGTERM) && !once) { + once = true; printf("\nSignal %d received, preparing to exit...\n", signum); - /* request all lcores to exit from the main loop */ - *(int *)test->test_priv = true; - rte_wmb(); - rte_eal_mp_wait_lcore(); + if (test != NULL) { + /* request all lcores to exit from the main loop */ + *(int *)test->test_priv = true; + rte_wmb(); + + if (test->ops.ethdev_destroy) + test->ops.ethdev_destroy(test, &opt); - if (test->ops.test_result) - test->ops.test_result(test, &opt); + rte_eal_mp_wait_lcore(); - if (test->ops.eventdev_destroy) - test->ops.eventdev_destroy(test, &opt); + if (test->ops.test_result) + test->ops.test_result(test, &opt); - if (test->ops.ethdev_destroy) - test->ops.ethdev_destroy(test, &opt); + if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { + RTE_ETH_FOREACH_DEV(i) + rte_eth_dev_close(i); + } - if (test->ops.mempool_destroy) - test->ops.mempool_destroy(test, &opt); + if (test->ops.eventdev_destroy) + test->ops.eventdev_destroy(test, &opt); - if (test->ops.test_destroy) - test->ops.test_destroy(test, &opt); + if (test->ops.mempool_destroy) + test->ops.mempool_destroy(test, &opt); + + if (test->ops.test_destroy) + test->ops.test_destroy(test, &opt); + } /* exit with the expected status */ signal(signum, SIG_DFL); diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c index 9683b222..cfa43a16 100644 --- a/app/test-eventdev/evt_options.c +++ b/app/test-eventdev/evt_options.c @@ -27,6 +27,11 @@ evt_options_default(struct evt_options *opt) opt->pool_sz = 16 * 1024; opt->wkr_deq_dep = 16; opt->nb_pkts = (1ULL << 26); /* do ~64M packets */ + opt->nb_timers = 1E8; + opt->nb_timer_adptrs = 1; + opt->timer_tick_nsec = 1E3; /* 1000ns ~ 1us */ + opt->max_tmo_nsec = 1E5; /* 100000ns ~100us */ + opt->expiry_nsec = 1E4; /* 10000ns ~10us */ opt->prod_type = EVT_PROD_TYPE_SYNT; } @@ -87,6 +92,22 @@ evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused) } static int +evt_parse_timer_prod_type(struct evt_options *opt, const char *arg __rte_unused) +{ + opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR; + return 0; +} + +static int +evt_parse_timer_prod_type_burst(struct evt_options *opt, + const char *arg __rte_unused) +{ + opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR; + opt->timdev_use_burst = 1; + return 0; +} + +static int evt_parse_test_name(struct evt_options *opt, const char *arg) { snprintf(opt->test_name, EVT_TEST_NAME_MAX_LEN, "%s", arg); @@ -120,6 +141,56 @@ evt_parse_nb_pkts(struct evt_options *opt, const char *arg) } static int +evt_parse_nb_timers(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint64(&(opt->nb_timers), arg); + + return ret; +} + +static int +evt_parse_timer_tick_nsec(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint64(&(opt->timer_tick_nsec), arg); + + return ret; +} + +static int +evt_parse_max_tmo_nsec(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint64(&(opt->max_tmo_nsec), arg); + + return ret; +} + +static int +evt_parse_expiry_nsec(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint64(&(opt->expiry_nsec), arg); + + return ret; +} + +static int +evt_parse_nb_timer_adptrs(struct evt_options *opt, const char *arg) +{ + int ret; + + ret = parser_read_uint8(&(opt->nb_timer_adptrs), arg); + + return ret; +} + +static int evt_parse_pool_sz(struct evt_options *opt, const char *arg) { opt->pool_sz = atoi(arg); @@ -169,7 +240,17 @@ usage(char *program) "\t--worker_deq_depth : dequeue depth of the worker\n" "\t--fwd_latency : perform fwd_latency measurement\n" "\t--queue_priority : enable queue priority\n" - "\t--prod_type_ethdev : use ethernet device as producer\n." + "\t--prod_type_ethdev : use ethernet device as producer.\n" + "\t--prod_type_timerdev : use event timer device as producer.\n" + "\t expity_nsec would be the timeout\n" + "\t in ns.\n" + "\t--prod_type_timerdev_burst : use timer device as producer\n" + "\t burst mode.\n" + "\t--nb_timers : number of timers to arm.\n" + "\t--nb_timer_adptrs : number of timer adapters to use.\n" + "\t--timer_tick_nsec : timer tick interval in ns.\n" + "\t--max_tmo_nsec : max timeout interval in ns.\n" + "\t--expiry_nsec : event timer expiry ns.\n" ); printf("available tests:\n"); evt_test_dump_names(); @@ -217,22 +298,29 @@ evt_parse_sched_type_list(struct evt_options *opt, const char *arg) } static struct option lgopts[] = { - { EVT_NB_FLOWS, 1, 0, 0 }, - { EVT_DEVICE, 1, 0, 0 }, - { EVT_VERBOSE, 1, 0, 0 }, - { EVT_TEST, 1, 0, 0 }, - { EVT_PROD_LCORES, 1, 0, 0 }, - { EVT_WORK_LCORES, 1, 0, 0 }, - { EVT_SOCKET_ID, 1, 0, 0 }, - { EVT_POOL_SZ, 1, 0, 0 }, - { EVT_NB_PKTS, 1, 0, 0 }, - { EVT_WKR_DEQ_DEP, 1, 0, 0 }, - { EVT_SCHED_TYPE_LIST, 1, 0, 0 }, - { EVT_FWD_LATENCY, 0, 0, 0 }, - { EVT_QUEUE_PRIORITY, 0, 0, 0 }, - { EVT_PROD_ETHDEV, 0, 0, 0 }, - { EVT_HELP, 0, 0, 0 }, - { NULL, 0, 0, 0 } + { EVT_NB_FLOWS, 1, 0, 0 }, + { EVT_DEVICE, 1, 0, 0 }, + { EVT_VERBOSE, 1, 0, 0 }, + { EVT_TEST, 1, 0, 0 }, + { EVT_PROD_LCORES, 1, 0, 0 }, + { EVT_WORK_LCORES, 1, 0, 0 }, + { EVT_SOCKET_ID, 1, 0, 0 }, + { EVT_POOL_SZ, 1, 0, 0 }, + { EVT_NB_PKTS, 1, 0, 0 }, + { EVT_WKR_DEQ_DEP, 1, 0, 0 }, + { EVT_SCHED_TYPE_LIST, 1, 0, 0 }, + { EVT_FWD_LATENCY, 0, 0, 0 }, + { EVT_QUEUE_PRIORITY, 0, 0, 0 }, + { EVT_PROD_ETHDEV, 0, 0, 0 }, + { EVT_PROD_TIMERDEV, 0, 0, 0 }, + { EVT_PROD_TIMERDEV_BURST, 0, 0, 0 }, + { EVT_NB_TIMERS, 1, 0, 0 }, + { EVT_NB_TIMER_ADPTRS, 1, 0, 0 }, + { EVT_TIMER_TICK_NSEC, 1, 0, 0 }, + { EVT_MAX_TMO_NSEC, 1, 0, 0 }, + { EVT_EXPIRY_NSEC, 1, 0, 0 }, + { EVT_HELP, 0, 0, 0 }, + { NULL, 0, 0, 0 } }; static int @@ -255,11 +343,18 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt) { EVT_FWD_LATENCY, evt_parse_fwd_latency}, { EVT_QUEUE_PRIORITY, evt_parse_queue_priority}, { EVT_PROD_ETHDEV, evt_parse_eth_prod_type}, + { EVT_PROD_TIMERDEV, evt_parse_timer_prod_type}, + { EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst}, + { EVT_NB_TIMERS, evt_parse_nb_timers}, + { EVT_NB_TIMER_ADPTRS, evt_parse_nb_timer_adptrs}, + { EVT_TIMER_TICK_NSEC, evt_parse_timer_tick_nsec}, + { EVT_MAX_TMO_NSEC, evt_parse_max_tmo_nsec}, + { EVT_EXPIRY_NSEC, evt_parse_expiry_nsec}, }; for (i = 0; i < RTE_DIM(parsermap); i++) { if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name, - strlen(parsermap[i].lgopt_name)) == 0) + strlen(lgopts[opt_idx].name)) == 0) return parsermap[i].parser_fn(opt, optarg); } @@ -305,6 +400,7 @@ evt_options_dump(struct evt_options *opt) evt_dump("pool_sz", "%d", opt->pool_sz); evt_dump("master lcore", "%d", rte_get_master_lcore()); evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts); + evt_dump("nb_timers", "%"PRIu64, opt->nb_timers); evt_dump_begin("available lcores"); RTE_LCORE_FOREACH(lcore_id) printf("%d ", lcore_id); diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h index 46d12222..f3de48a1 100644 --- a/app/test-eventdev/evt_options.h +++ b/app/test-eventdev/evt_options.h @@ -9,6 +9,7 @@ #include <stdbool.h> #include <rte_common.h> +#include <rte_ethdev.h> #include <rte_eventdev.h> #include <rte_lcore.h> @@ -31,12 +32,20 @@ #define EVT_FWD_LATENCY ("fwd_latency") #define EVT_QUEUE_PRIORITY ("queue_priority") #define EVT_PROD_ETHDEV ("prod_type_ethdev") +#define EVT_PROD_TIMERDEV ("prod_type_timerdev") +#define EVT_PROD_TIMERDEV_BURST ("prod_type_timerdev_burst") +#define EVT_NB_TIMERS ("nb_timers") +#define EVT_NB_TIMER_ADPTRS ("nb_timer_adptrs") +#define EVT_TIMER_TICK_NSEC ("timer_tick_nsec") +#define EVT_MAX_TMO_NSEC ("max_tmo_nsec") +#define EVT_EXPIRY_NSEC ("expiry_nsec") #define EVT_HELP ("help") enum evt_prod_type { EVT_PROD_TYPE_NONE, EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */ EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */ + EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */ EVT_PROD_TYPE_MAX, }; @@ -52,11 +61,19 @@ struct evt_options { int nb_stages; int verbose_level; uint64_t nb_pkts; + uint8_t nb_timer_adptrs; + uint64_t nb_timers; + uint64_t timer_tick_nsec; + uint64_t optm_timer_tick_nsec; + uint64_t max_tmo_nsec; + uint64_t expiry_nsec; uint16_t wkr_deq_dep; uint8_t dev_id; uint32_t fwd_latency:1; uint32_t q_priority:1; enum evt_prod_type prod_type; + uint8_t timdev_use_burst; + uint8_t timdev_cnt; }; void evt_options_default(struct evt_options *opt); @@ -262,6 +279,24 @@ evt_dump_producer_type(struct evt_options *opt) case EVT_PROD_TYPE_ETH_RX_ADPTR: snprintf(name, EVT_PROD_MAX_NAME_LEN, "Ethdev Rx Adapter producers"); + evt_dump("nb_ethdev", "%d", rte_eth_dev_count_avail()); + break; + case EVT_PROD_TYPE_EVENT_TIMER_ADPTR: + if (opt->timdev_use_burst) + snprintf(name, EVT_PROD_MAX_NAME_LEN, + "Event timer adapter burst mode producer"); + else + snprintf(name, EVT_PROD_MAX_NAME_LEN, + "Event timer adapter producer"); + evt_dump("nb_timer_adapters", "%d", opt->nb_timer_adptrs); + evt_dump("max_tmo_nsec", "%"PRIu64"", opt->max_tmo_nsec); + evt_dump("expiry_nsec", "%"PRIu64"", opt->expiry_nsec); + if (opt->optm_timer_tick_nsec) + evt_dump("optm_timer_tick_nsec", "%"PRIu64"", + opt->optm_timer_tick_nsec); + else + evt_dump("timer_tick_nsec", "%"PRIu64"", + opt->timer_tick_nsec); break; } evt_dump("prod_type", "%s", name); diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h index 7477a325..f07d2c33 100644 --- a/app/test-eventdev/evt_test.h +++ b/app/test-eventdev/evt_test.h @@ -77,8 +77,7 @@ void evt_test_dump_names(void); #define EVT_TEST_REGISTER(nm) \ static struct evt_test_entry _evt_test_entry_ ##nm; \ -RTE_INIT(evt_test_ ##nm); \ -static void evt_test_ ##nm(void) \ +RTE_INIT(evt_test_ ##nm) \ { \ _evt_test_entry_ ##nm.test.name = RTE_STR(nm);\ memcpy(&_evt_test_entry_ ##nm.test.ops, &nm, \ diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build index 7c373c87..a81dcd13 100644 --- a/app/test-eventdev/meson.build +++ b/app/test-eventdev/meson.build @@ -1,6 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Cavium, Inc +allow_experimental_apis = true sources = files('evt_main.c', 'evt_options.c', 'evt_test.c', @@ -11,19 +12,4 @@ sources = files('evt_main.c', 'test_perf_common.c', 'test_perf_atq.c', 'test_perf_queue.c') - -dep_objs = [get_variable(get_option('default_library') + '_rte_eventdev')] -dep_objs += cc.find_library('execinfo', required: false) # BSD only - -link_libs = [] -if get_option('default_library') == 'static' - link_libs = dpdk_drivers -endif - -executable('dpdk-test-eventdev', - sources, - c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'], - link_whole: link_libs, - dependencies: dep_objs, - install_rpath: join_paths(get_option('prefix'), driver_install_path), - install: true) +deps += 'eventdev' diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c index c57fbbfa..35debcfd 100644 --- a/app/test-eventdev/test_order_atq.c +++ b/app/test-eventdev/test_order_atq.c @@ -151,10 +151,14 @@ order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) if (ret) return ret; - ret = evt_service_setup(opt->dev_id); - if (ret) { - evt_err("No service lcore found to run event dev."); - return ret; + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } } ret = rte_event_dev_start(opt->dev_id); diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c index f603a023..17f7b984 100644 --- a/app/test-eventdev/test_order_queue.c +++ b/app/test-eventdev/test_order_queue.c @@ -164,10 +164,14 @@ order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) if (ret) return ret; - ret = evt_service_setup(opt->dev_id); - if (ret) { - evt_err("No service lcore found to run event dev."); - return ret; + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } } ret = rte_event_dev_start(opt->dev_id); diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c index b36b22a7..9715a2ce 100644 --- a/app/test-eventdev/test_perf_atq.c +++ b/app/test-eventdev/test_perf_atq.c @@ -11,7 +11,7 @@ atq_nb_event_queues(struct evt_options *opt) { /* nb_queues = number of producers */ return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? - rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores); + rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); } static inline __attribute__((always_inline)) void @@ -43,15 +43,12 @@ perf_atq_worker(void *arg, const int enable_fwd_latency) while (t->done == false) { uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); - if (enable_fwd_latency) - rte_prefetch0(ev.event_ptr); - if (!event) { rte_pause(); continue; } - if (enable_fwd_latency) + if (enable_fwd_latency && !prod_timer_type) /* first stage in pipeline, mark ts to compute fwd latency */ atq_mark_fwd_latency(&ev); @@ -90,7 +87,7 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency) } for (i = 0; i < nb_rx; i++) { - if (enable_fwd_latency) { + if (enable_fwd_latency && !prod_timer_type) { rte_prefetch0(ev[i+1].event_ptr); /* first stage in pipeline. * mark time stamp to compute fwd latency @@ -163,7 +160,8 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) struct rte_event_dev_info dev_info; nb_ports = evt_nr_active_lcores(opt->wlcores); - nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : + nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 : evt_nr_active_lcores(opt->plcores); nb_queues = atq_nb_event_queues(opt); diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c index 59fa0a49..d33cb2cd 100644 --- a/app/test-eventdev/test_perf_common.c +++ b/app/test-eventdev/test_perf_common.c @@ -72,6 +72,128 @@ perf_producer(void *arg) return 0; } +static inline int +perf_event_timer_producer(void *arg) +{ + struct prod_data *p = arg; + struct test_perf *t = p->t; + struct evt_options *opt = t->opt; + uint32_t flow_counter = 0; + uint64_t count = 0; + uint64_t arm_latency = 0; + const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs; + const uint32_t nb_flows = t->nb_flows; + const uint64_t nb_timers = opt->nb_timers; + struct rte_mempool *pool = t->pool; + struct perf_elt *m; + struct rte_event_timer_adapter **adptr = t->timer_adptr; + struct rte_event_timer tim; + uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec; + + memset(&tim, 0, sizeof(struct rte_event_timer)); + timeout_ticks = opt->optm_timer_tick_nsec ? + (timeout_ticks * opt->timer_tick_nsec) + / opt->optm_timer_tick_nsec : timeout_ticks; + timeout_ticks += timeout_ticks ? 0 : 1; + tim.ev.event_type = RTE_EVENT_TYPE_TIMER; + tim.ev.op = RTE_EVENT_OP_NEW; + tim.ev.sched_type = t->opt->sched_type_list[0]; + tim.ev.queue_id = p->queue_id; + tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; + tim.state = RTE_EVENT_TIMER_NOT_ARMED; + tim.timeout_ticks = timeout_ticks; + + if (opt->verbose_level > 1) + printf("%s(): lcore %d\n", __func__, rte_lcore_id()); + + while (count < nb_timers && t->done == false) { + if (rte_mempool_get(pool, (void **)&m) < 0) + continue; + + m->tim = tim; + m->tim.ev.flow_id = flow_counter++ % nb_flows; + m->tim.ev.event_ptr = m; + m->timestamp = rte_get_timer_cycles(); + while (rte_event_timer_arm_burst( + adptr[flow_counter % nb_timer_adptrs], + (struct rte_event_timer **)&m, 1) != 1) { + if (t->done) + break; + rte_pause(); + m->timestamp = rte_get_timer_cycles(); + } + arm_latency += rte_get_timer_cycles() - m->timestamp; + count++; + } + fflush(stdout); + rte_delay_ms(1000); + printf("%s(): lcore %d Average event timer arm latency = %.3f us\n", + __func__, rte_lcore_id(), (float)(arm_latency / count) / + (rte_get_timer_hz() / 1000000)); + return 0; +} + +static inline int +perf_event_timer_producer_burst(void *arg) +{ + int i; + struct prod_data *p = arg; + struct test_perf *t = p->t; + struct evt_options *opt = t->opt; + uint32_t flow_counter = 0; + uint64_t count = 0; + uint64_t arm_latency = 0; + const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs; + const uint32_t nb_flows = t->nb_flows; + const uint64_t nb_timers = opt->nb_timers; + struct rte_mempool *pool = t->pool; + struct perf_elt *m[BURST_SIZE + 1] = {NULL}; + struct rte_event_timer_adapter **adptr = t->timer_adptr; + struct rte_event_timer tim; + uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec; + + memset(&tim, 0, sizeof(struct rte_event_timer)); + timeout_ticks = opt->optm_timer_tick_nsec ? + (timeout_ticks * opt->timer_tick_nsec) + / opt->optm_timer_tick_nsec : timeout_ticks; + timeout_ticks += timeout_ticks ? 0 : 1; + tim.ev.event_type = RTE_EVENT_TYPE_TIMER; + tim.ev.op = RTE_EVENT_OP_NEW; + tim.ev.sched_type = t->opt->sched_type_list[0]; + tim.ev.queue_id = p->queue_id; + tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; + tim.state = RTE_EVENT_TIMER_NOT_ARMED; + tim.timeout_ticks = timeout_ticks; + + if (opt->verbose_level > 1) + printf("%s(): lcore %d\n", __func__, rte_lcore_id()); + + while (count < nb_timers && t->done == false) { + if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0) + continue; + for (i = 0; i < BURST_SIZE; i++) { + rte_prefetch0(m[i + 1]); + m[i]->tim = tim; + m[i]->tim.ev.flow_id = flow_counter++ % nb_flows; + m[i]->tim.ev.event_ptr = m[i]; + m[i]->timestamp = rte_get_timer_cycles(); + } + rte_event_timer_arm_tmo_tick_burst( + adptr[flow_counter % nb_timer_adptrs], + (struct rte_event_timer **)m, + tim.timeout_ticks, + BURST_SIZE); + arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp; + count += BURST_SIZE; + } + fflush(stdout); + rte_delay_ms(1000); + printf("%s(): lcore %d Average event timer arm latency = %.3f us\n", + __func__, rte_lcore_id(), (float)(arm_latency / count) / + (rte_get_timer_hz() / 1000000)); + return 0; +} + static int perf_producer_wrapper(void *arg) { @@ -80,6 +202,12 @@ perf_producer_wrapper(void *arg) /* Launch the producer function only in case of synthetic producer. */ if (t->opt->prod_type == EVT_PROD_TYPE_SYNT) return perf_producer(arg); + else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR && + !t->opt->timdev_use_burst) + return perf_event_timer_producer(arg); + else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR && + t->opt->timdev_use_burst) + return perf_event_timer_producer_burst(arg); return 0; } @@ -146,8 +274,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt, port_idx++; } - const uint64_t total_pkts = opt->nb_pkts * - evt_nr_active_lcores(opt->plcores); + const uint64_t total_pkts = t->outstand_pkts; uint64_t dead_lock_cycles = rte_get_timer_cycles(); int64_t dead_lock_remaining = total_pkts; @@ -189,7 +316,9 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt, if (remaining <= 0) { t->result = EVT_TEST_SUCCESS; - if (opt->prod_type == EVT_PROD_TYPE_SYNT) { + if (opt->prod_type == EVT_PROD_TYPE_SYNT || + opt->prod_type == + EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { t->done = true; rte_smp_wmb(); break; @@ -226,7 +355,7 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, memset(&queue_conf, 0, sizeof(struct rte_event_eth_rx_adapter_queue_conf)); queue_conf.ev.sched_type = opt->sched_type_list[0]; - for (prod = 0; prod < rte_eth_dev_count(); prod++) { + RTE_ETH_FOREACH_DEV(prod) { uint32_t cap; ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, @@ -283,6 +412,65 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, return ret; } +static int +perf_event_timer_adapter_setup(struct test_perf *t) +{ + int i; + int ret; + struct rte_event_timer_adapter_info adapter_info; + struct rte_event_timer_adapter *wl; + uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores); + uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; + + if (nb_producers == 1) + flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT; + + for (i = 0; i < t->opt->nb_timer_adptrs; i++) { + struct rte_event_timer_adapter_conf config = { + .event_dev_id = t->opt->dev_id, + .timer_adapter_id = i, + .timer_tick_ns = t->opt->timer_tick_nsec, + .max_tmo_ns = t->opt->max_tmo_nsec, + .nb_timers = 2 * 1024 * 1024, + .flags = flags, + }; + + wl = rte_event_timer_adapter_create(&config); + if (wl == NULL) { + evt_err("failed to create event timer ring %d", i); + return rte_errno; + } + + memset(&adapter_info, 0, + sizeof(struct rte_event_timer_adapter_info)); + rte_event_timer_adapter_get_info(wl, &adapter_info); + t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns; + + if (!(adapter_info.caps & + RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { + uint32_t service_id; + + rte_event_timer_adapter_service_id_get(wl, + &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("Failed to setup service core" + " for timer adapter\n"); + return ret; + } + rte_service_runstate_set(service_id, 1); + } + + ret = rte_event_timer_adapter_start(wl); + if (ret) { + evt_err("failed to Start event timer adapter %d", i); + return ret; + } + t->timer_adptr[i] = wl; + } + return 0; +} + int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, uint8_t stride, uint8_t nb_queues, @@ -326,6 +514,18 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, ret = perf_event_rx_adapter_setup(opt, stride, *port_conf); if (ret) return ret; + } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { + prod = 0; + for ( ; port < perf_nb_event_ports(opt); port++) { + struct prod_data *p = &t->prod[port]; + p->queue_id = prod * stride; + p->t = t; + prod++; + } + + ret = perf_event_timer_adapter_setup(t); + if (ret) + return ret; } else { prod = 0; for ( ; port < perf_nb_event_ports(opt); port++) { @@ -415,10 +615,13 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues) } /* Fixups */ - if (opt->nb_stages == 1 && opt->fwd_latency) { + if ((opt->nb_stages == 1 && + opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) && + opt->fwd_latency) { evt_info("fwd_latency is valid when nb_stages > 1, disabling"); opt->fwd_latency = 0; } + if (opt->fwd_latency && !opt->q_priority) { evt_info("enabled queue priority for latency measurement"); opt->q_priority = 1; @@ -447,8 +650,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues) void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt) { - RTE_SET_USED(test); + int i; + struct test_perf *t = evt_test_priv(test); + if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { + for (i = 0; i < opt->nb_timer_adptrs; i++) + rte_event_timer_adapter_stop(t->timer_adptr[i]); + } rte_event_dev_stop(opt->dev_id); rte_event_dev_close(opt->dev_id); } @@ -465,20 +673,14 @@ perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused, int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; struct test_perf *t = evt_test_priv(test); struct rte_eth_conf port_conf = { .rxmode = { .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, - .hw_ip_checksum = 0, - .hw_vlan_filter = 0, - .hw_vlan_strip = 0, - .hw_vlan_extend = 0, - .jumbo_frame = 0, - .hw_strip_crc = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .rx_adv_conf = { .rss_conf = { @@ -488,19 +690,33 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) }, }; - if (opt->prod_type == EVT_PROD_TYPE_SYNT) + if (opt->prod_type == EVT_PROD_TYPE_SYNT || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) return 0; - if (!rte_eth_dev_count()) { + if (!rte_eth_dev_count_avail()) { evt_err("No ethernet ports found."); return -ENODEV; } - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { + struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_port_conf = port_conf; + + rte_eth_dev_info_get(i, &dev_info); + + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + evt_info("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + i, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } - if (rte_eth_dev_configure(i, 1, 1, - &port_conf) - < 0) { + if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) { evt_err("Failed to configure eth port [%d]", i); return -EINVAL; } @@ -527,14 +743,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; RTE_SET_USED(test); if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { rte_event_eth_rx_adapter_stop(i); rte_eth_dev_stop(i); - rte_eth_dev_close(i); } } } @@ -544,7 +759,8 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt) { struct test_perf *t = evt_test_priv(test); - if (opt->prod_type == EVT_PROD_TYPE_SYNT) { + if (opt->prod_type == EVT_PROD_TYPE_SYNT || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { t->pool = rte_mempool_create(test->name, /* mempool name */ opt->pool_sz, /* number of elements*/ sizeof(struct perf_elt), /* element size*/ @@ -594,10 +810,18 @@ perf_test_setup(struct evt_test *test, struct evt_options *opt) struct test_perf *t = evt_test_priv(test); - t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores); + if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { + t->outstand_pkts = opt->nb_timers * + evt_nr_active_lcores(opt->plcores); + t->nb_pkts = opt->nb_timers; + } else { + t->outstand_pkts = opt->nb_pkts * + evt_nr_active_lcores(opt->plcores); + t->nb_pkts = opt->nb_pkts; + } + t->nb_workers = evt_nr_active_lcores(opt->wlcores); t->done = false; - t->nb_pkts = opt->nb_pkts; t->nb_flows = opt->nb_flows; t->result = EVT_TEST_FAILED; t->opt = opt; diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h index 9ad99733..d8fbee6d 100644 --- a/app/test-eventdev/test_perf_common.h +++ b/app/test-eventdev/test_perf_common.h @@ -13,6 +13,7 @@ #include <rte_ethdev.h> #include <rte_eventdev.h> #include <rte_event_eth_rx_adapter.h> +#include <rte_event_timer_adapter.h> #include <rte_lcore.h> #include <rte_malloc.h> #include <rte_mempool.h> @@ -39,6 +40,7 @@ struct prod_data { struct test_perf *t; } __rte_cache_aligned; + struct test_perf { /* Don't change the offset of "done". Signal handler use this memory * to terminate all lcores work. @@ -54,10 +56,18 @@ struct test_perf { struct worker_data worker[EVT_MAX_PORTS]; struct evt_options *opt; uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned; + struct rte_event_timer_adapter *timer_adptr[ + RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned; } __rte_cache_aligned; struct perf_elt { - uint64_t timestamp; + union { + struct rte_event_timer tim; + struct { + char pad[offsetof(struct rte_event_timer, user_meta)]; + uint64_t timestamp; + }; + }; } __rte_cache_aligned; #define BURST_SIZE 16 @@ -68,6 +78,8 @@ struct perf_elt { struct evt_options *opt = t->opt;\ const uint8_t dev = w->dev_id;\ const uint8_t port = w->port_id;\ + const uint8_t prod_timer_type = \ + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\ uint8_t *const sched_type_list = &t->sched_type_list[0];\ struct rte_mempool *const pool = t->pool;\ const uint8_t nb_stages = t->opt->nb_stages;\ diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c index db8f2f3e..04ce9419 100644 --- a/app/test-eventdev/test_perf_queue.c +++ b/app/test-eventdev/test_perf_queue.c @@ -11,7 +11,7 @@ perf_queue_nb_event_queues(struct evt_options *opt) { /* nb_queues = number of producers * number of stages */ uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? - rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores); + rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); return nb_prod * opt->nb_stages; } @@ -49,7 +49,7 @@ perf_queue_worker(void *arg, const int enable_fwd_latency) rte_pause(); continue; } - if (enable_fwd_latency) + if (enable_fwd_latency && !prod_timer_type) /* first q in pipeline, mark timestamp to compute fwd latency */ mark_fwd_latency(&ev, nb_stages); @@ -88,7 +88,7 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency) } for (i = 0; i < nb_rx; i++) { - if (enable_fwd_latency) { + if (enable_fwd_latency && !prod_timer_type) { rte_prefetch0(ev[i+1].event_ptr); /* first queue in pipeline. * mark time stamp to compute fwd latency @@ -161,7 +161,8 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) struct rte_event_dev_info dev_info; nb_ports = evt_nr_active_lcores(opt->wlcores); - nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : + nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 : evt_nr_active_lcores(opt->plcores); nb_queues = perf_queue_nb_event_queues(opt); diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c index dd718977..26dc79f9 100644 --- a/app/test-eventdev/test_pipeline_atq.c +++ b/app/test-eventdev/test_pipeline_atq.c @@ -12,7 +12,7 @@ pipeline_atq_nb_event_queues(struct evt_options *opt) { RTE_SET_USED(opt); - return rte_eth_dev_count(); + return rte_eth_dev_count_avail(); } static int @@ -324,7 +324,7 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) uint8_t nb_worker_queues = 0; nb_ports = evt_nr_active_lcores(opt->wlcores); - nb_queues = rte_eth_dev_count(); + nb_queues = rte_eth_dev_count_avail(); /* One extra port and queueu for Tx service */ if (t->mt_unsafe) { diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c index 6cad9357..a54068df 100644 --- a/app/test-eventdev/test_pipeline_common.c +++ b/app/test-eventdev/test_pipeline_common.c @@ -166,7 +166,7 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) */ lcores = 2; - if (!rte_eth_dev_count()) { + if (!rte_eth_dev_count_avail()) { evt_err("test needs minimum 1 ethernet dev"); return -1; } @@ -213,7 +213,7 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; uint8_t nb_queues = 1; uint8_t mt_state = 0; struct test_pipeline *t = evt_test_priv(test); @@ -223,7 +223,6 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .offloads = DEV_RX_OFFLOAD_CRC_STRIP, - .ignore_offload_bitfield = 1, }, .rx_adv_conf = { .rss_conf = { @@ -234,23 +233,34 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) }; RTE_SET_USED(opt); - if (!rte_eth_dev_count()) { + if (!rte_eth_dev_count_avail()) { evt_err("No ethernet ports found.\n"); return -ENODEV; } - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_port_conf = port_conf; - memset(&dev_info, 0, sizeof(struct rte_eth_dev_info)); rte_eth_dev_info_get(i, &dev_info); mt_state = !(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MT_LOCKFREE); rx_conf = dev_info.default_rxconf; rx_conf.offloads = port_conf.rxmode.offloads; + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + evt_info("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + i, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + if (rte_eth_dev_configure(i, nb_queues, nb_queues, - &port_conf) + &local_port_conf) < 0) { evt_err("Failed to configure eth port [%d]\n", i); return -EINVAL; @@ -337,7 +347,7 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, memset(&queue_conf, 0, sizeof(struct rte_event_eth_rx_adapter_queue_conf)); queue_conf.ev.sched_type = opt->sched_type_list[0]; - for (prod = 0; prod < rte_eth_dev_count(); prod++) { + RTE_ETH_FOREACH_DEV(prod) { uint32_t cap; ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, @@ -419,7 +429,7 @@ pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt, tx->dev_id = opt->dev_id; tx->queue_id = tx_queue_id; tx->port_id = tx_port_id; - tx->nb_ethports = rte_eth_dev_count(); + tx->nb_ethports = rte_eth_dev_count_avail(); tx->t = t; /* Register Tx service */ @@ -453,7 +463,7 @@ pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt, void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; RTE_SET_USED(test); RTE_SET_USED(opt); struct test_pipeline *t = evt_test_priv(test); @@ -464,10 +474,9 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) rte_service_component_unregister(t->tx_service.service_id); } - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { rte_event_eth_rx_adapter_stop(i); rte_eth_dev_stop(i); - rte_eth_dev_close(i); } } diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c index 02fc27cf..ca5f4578 100644 --- a/app/test-eventdev/test_pipeline_queue.c +++ b/app/test-eventdev/test_pipeline_queue.c @@ -10,7 +10,7 @@ static __rte_always_inline int pipeline_queue_nb_event_queues(struct evt_options *opt) { - uint16_t eth_count = rte_eth_dev_count(); + uint16_t eth_count = rte_eth_dev_count_avail(); return (eth_count * opt->nb_stages) + eth_count; } @@ -333,7 +333,7 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) uint8_t nb_worker_queues = 0; nb_ports = evt_nr_active_lcores(opt->wlcores); - nb_queues = rte_eth_dev_count() * (nb_stages); + nb_queues = rte_eth_dev_count_avail() * (nb_stages); /* Extra port for Tx service. */ if (t->mt_unsafe) { @@ -341,7 +341,7 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) nb_ports++; nb_queues++; } else - nb_queues += rte_eth_dev_count(); + nb_queues += rte_eth_dev_count_avail(); rte_event_dev_info_get(opt->dev_id, &info); |