diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-02-19 11:16:57 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-02-19 11:17:28 +0000 |
commit | ca33590b6af032bff57d9cc70455660466a654b2 (patch) | |
tree | 0b68b090bd9b4a78a3614b62400b29279d76d553 /app | |
parent | 169a9de21e263aa6599cdc2d87a45ae158d9f509 (diff) |
New upstream version 18.02upstream/18.02
Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'app')
81 files changed, 9686 insertions, 2591 deletions
diff --git a/app/Makefile b/app/Makefile index 7ea02b01..0eaed538 100644 --- a/app/Makefile +++ b/app/Makefile @@ -1,40 +1,16 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation include $(RTE_SDK)/mk/rte.vars.mk DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd -DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += proc_info +DIRS-$(CONFIG_RTE_PROC_INFO) += proc_info DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump +ifeq ($(CONFIG_RTE_LIBRTE_BBDEV),y) +DIRS-$(CONFIG_RTE_TEST_BBDEV) += test-bbdev +endif + ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y) DIRS-$(CONFIG_RTE_APP_CRYPTO_PERF) += test-crypto-perf endif diff --git a/app/meson.build b/app/meson.build new file mode 100644 index 00000000..0088de46 --- /dev/null +++ b/app/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +subdir('test-pmd') +subdir('test-eventdev') diff --git a/app/pdump/Makefile b/app/pdump/Makefile index 38ac3e9a..bd3c2081 100644 --- a/app/pdump/Makefile +++ b/app/pdump/Makefile @@ -1,33 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2016 Intel Corporation. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2016 Intel Corporation include $(RTE_SDK)/mk/rte.vars.mk @@ -35,6 +7,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_PDUMP),y) APP = dpdk-pdump +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += $(WERROR_FLAGS) # all source are stored in SRCS-y diff --git a/app/pdump/main.c b/app/pdump/main.c index 66272f59..f6865bdb 100644 --- a/app/pdump/main.c +++ b/app/pdump/main.c @@ -1,34 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation */ #include <stdio.h> @@ -911,5 +882,9 @@ main(int argc, char **argv) /* dump debug stats */ print_pdump_stats(); + ret = rte_eal_cleanup(); + if (ret) + printf("Error from rte_eal_cleanup(), %d\n", ret); + return 0; } diff --git a/app/proc_info/Makefile b/app/proc_info/Makefile index 9e90438e..9e87f524 100644 --- a/app/proc_info/Makefile +++ b/app/proc_info/Makefile @@ -1,38 +1,11 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2015 Intel Corporation. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2015 Intel Corporation include $(RTE_SDK)/mk/rte.vars.mk APP = dpdk-procinfo +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += $(WERROR_FLAGS) # all source are stored in SRCS-y diff --git a/app/proc_info/main.c b/app/proc_info/main.c index 64fbbd0f..2f53e3ca 100644 --- a/app/proc_info/main.c +++ b/app/proc_info/main.c @@ -1,34 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include <stdio.h> @@ -689,5 +660,9 @@ main(int argc, char **argv) if (enable_metrics) metrics_display(RTE_METRICS_GLOBAL); + ret = rte_eal_cleanup(); + if (ret) + printf("Error from rte_eal_cleanup(), %d\n", ret); + return 0; } diff --git a/app/test-bbdev/Makefile b/app/test-bbdev/Makefile new file mode 100644 index 00000000..9aedd776 --- /dev/null +++ b/app/test-bbdev/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +APP = testbbdev + +CFLAGS += -DALLOW_EXPERIMENTAL_API +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# +# all sources are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_TEST_BBDEV) += main.c +SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev.c +SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev_perf.c +SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev_vector.c + +include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/test-bbdev/main.c b/app/test-bbdev/main.c new file mode 100644 index 00000000..41b54bb1 --- /dev/null +++ b/app/test-bbdev/main.c @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include <getopt.h> +#include <inttypes.h> +#include <stdio.h> +#include <string.h> +#include <stdbool.h> + +#include <rte_eal.h> +#include <rte_common.h> +#include <rte_string_fns.h> +#include <rte_cycles.h> +#include <rte_lcore.h> + +#include "main.h" + +/* Defines how many testcases can be specified as cmdline args */ +#define MAX_CMDLINE_TESTCASES 8 + +static const char tc_sep = ','; + +static struct test_params { + struct test_command *test_to_run[MAX_CMDLINE_TESTCASES]; + unsigned int num_tests; + unsigned int num_ops; + unsigned int burst_sz; + unsigned int num_lcores; + char test_vector_filename[PATH_MAX]; +} test_params; + +static struct test_commands_list commands_list = + TAILQ_HEAD_INITIALIZER(commands_list); + +void +add_test_command(struct test_command *t) +{ + TAILQ_INSERT_TAIL(&commands_list, t, next); +} + +int +unit_test_suite_runner(struct unit_test_suite *suite) +{ + int test_result = TEST_SUCCESS; + unsigned int total = 0, skipped = 0, succeeded = 0, failed = 0; + uint64_t start, end; + + printf( + "\n + ------------------------------------------------------- +\n"); + printf(" + Starting Test Suite : %s\n", suite->suite_name); + + start = rte_rdtsc_precise(); + + if (suite->setup) { + test_result = suite->setup(); + if (test_result == TEST_FAILED) { + printf(" + Test suite setup %s failed!\n", + suite->suite_name); + printf( + " + ------------------------------------------------------- +\n"); + return 1; + } + if (test_result == TEST_SKIPPED) { + printf(" + Test suite setup %s skipped!\n", + suite->suite_name); + printf( + " + ------------------------------------------------------- +\n"); + return 0; + } + } + + while (suite->unit_test_cases[total].testcase) { + if (suite->unit_test_cases[total].setup) + test_result = suite->unit_test_cases[total].setup(); + + if (test_result == TEST_SUCCESS) + test_result = suite->unit_test_cases[total].testcase(); + + if (suite->unit_test_cases[total].teardown) + suite->unit_test_cases[total].teardown(); + + if (test_result == TEST_SUCCESS) { + succeeded++; + printf(" + TestCase [%2d] : %s passed\n", total, + suite->unit_test_cases[total].name); + } else if (test_result == TEST_SKIPPED) { + skipped++; + printf(" + TestCase [%2d] : %s skipped\n", total, + suite->unit_test_cases[total].name); + } else { + failed++; + printf(" + TestCase [%2d] : %s failed\n", total, + suite->unit_test_cases[total].name); + } + + total++; + } + + /* Run test suite teardown */ + if (suite->teardown) + suite->teardown(); + + end = rte_rdtsc_precise(); + + printf(" + ------------------------------------------------------- +\n"); + printf(" + Test Suite Summary : %s\n", suite->suite_name); + printf(" + Tests Total : %2d\n", total); + printf(" + Tests Skipped : %2d\n", skipped); + printf(" + Tests Passed : %2d\n", succeeded); + printf(" + Tests Failed : %2d\n", failed); + printf(" + Tests Lasted : %lg ms\n", + ((end - start) * 1000) / (double)rte_get_tsc_hz()); + printf(" + ------------------------------------------------------- +\n"); + + return (failed > 0) ? 1 : 0; +} + +const char * +get_vector_filename(void) +{ + return test_params.test_vector_filename; +} + +unsigned int +get_num_ops(void) +{ + return test_params.num_ops; +} + +unsigned int +get_burst_sz(void) +{ + return test_params.burst_sz; +} + +unsigned int +get_num_lcores(void) +{ + return test_params.num_lcores; +} + +static void +print_usage(const char *prog_name) +{ + struct test_command *t; + + printf("Usage: %s [EAL params] [-- [-n/--num-ops NUM_OPS]\n" + "\t[-b/--burst-size BURST_SIZE]\n" + "\t[-v/--test-vector VECTOR_FILE]\n" + "\t[-c/--test-cases TEST_CASE[,TEST_CASE,...]]]\n", + prog_name); + + printf("Available testcases: "); + TAILQ_FOREACH(t, &commands_list, next) + printf("%s ", t->command); + printf("\n"); +} + +static int +parse_args(int argc, char **argv, struct test_params *tp) +{ + int opt, option_index; + unsigned int num_tests = 0; + bool test_cases_present = false; + bool test_vector_present = false; + struct test_command *t; + char *tokens[MAX_CMDLINE_TESTCASES]; + int tc, ret; + + static struct option lgopts[] = { + { "num-ops", 1, 0, 'n' }, + { "burst-size", 1, 0, 'b' }, + { "test-cases", 1, 0, 'c' }, + { "test-vector", 1, 0, 'v' }, + { "lcores", 1, 0, 'l' }, + { "help", 0, 0, 'h' }, + { NULL, 0, 0, 0 } + }; + + while ((opt = getopt_long(argc, argv, "hn:b:c:v:l:", lgopts, + &option_index)) != EOF) + switch (opt) { + case 'n': + TEST_ASSERT(strlen(optarg) > 0, + "Num of operations is not provided"); + tp->num_ops = strtol(optarg, NULL, 10); + break; + case 'b': + TEST_ASSERT(strlen(optarg) > 0, + "Burst size is not provided"); + tp->burst_sz = strtol(optarg, NULL, 10); + TEST_ASSERT(tp->burst_sz <= MAX_BURST, + "Burst size mustn't be greater than %u", + MAX_BURST); + break; + case 'c': + TEST_ASSERT(test_cases_present == false, + "Test cases provided more than once"); + test_cases_present = true; + + ret = rte_strsplit(optarg, strlen(optarg), + tokens, MAX_CMDLINE_TESTCASES, tc_sep); + + TEST_ASSERT(ret <= MAX_CMDLINE_TESTCASES, + "Too many test cases (max=%d)", + MAX_CMDLINE_TESTCASES); + + for (tc = 0; tc < ret; ++tc) { + /* Find matching test case */ + TAILQ_FOREACH(t, &commands_list, next) + if (!strcmp(tokens[tc], t->command)) + tp->test_to_run[num_tests] = t; + + TEST_ASSERT(tp->test_to_run[num_tests] != NULL, + "Unknown test case: %s", + tokens[tc]); + ++num_tests; + } + break; + case 'v': + TEST_ASSERT(test_vector_present == false, + "Test vector provided more than once"); + test_vector_present = true; + + TEST_ASSERT(strlen(optarg) > 0, + "Config file name is null"); + + snprintf(tp->test_vector_filename, + sizeof(tp->test_vector_filename), + "%s", optarg); + break; + case 'l': + TEST_ASSERT(strlen(optarg) > 0, + "Num of lcores is not provided"); + tp->num_lcores = strtol(optarg, NULL, 10); + TEST_ASSERT(tp->num_lcores <= RTE_MAX_LCORE, + "Num of lcores mustn't be greater than %u", + RTE_MAX_LCORE); + break; + case 'h': + print_usage(argv[0]); + return 0; + default: + printf("ERROR: Unknown option: -%c\n", opt); + return -1; + } + + if (tp->num_ops == 0) { + printf( + "WARNING: Num of operations was not provided or was set 0. Set to default (%u)\n", + DEFAULT_OPS); + tp->num_ops = DEFAULT_OPS; + } + if (tp->burst_sz == 0) { + printf( + "WARNING: Burst size was not provided or was set 0. Set to default (%u)\n", + DEFAULT_BURST); + tp->burst_sz = DEFAULT_BURST; + } + if (tp->num_lcores == 0) { + printf( + "WARNING: Num of lcores was not provided or was set 0. Set to value from RTE config (%u)\n", + rte_lcore_count()); + tp->num_lcores = rte_lcore_count(); + } + + TEST_ASSERT(tp->burst_sz <= tp->num_ops, + "Burst size (%u) mustn't be greater than num ops (%u)", + tp->burst_sz, tp->num_ops); + + tp->num_tests = num_tests; + return 0; +} + +static int +run_all_tests(void) +{ + int ret = TEST_SUCCESS; + struct test_command *t; + + TAILQ_FOREACH(t, &commands_list, next) + ret |= t->callback(); + + return ret; +} + +static int +run_parsed_tests(struct test_params *tp) +{ + int ret = TEST_SUCCESS; + unsigned int i; + + for (i = 0; i < tp->num_tests; ++i) + ret |= tp->test_to_run[i]->callback(); + + return ret; +} + +int +main(int argc, char **argv) +{ + int ret; + + /* Init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + return 1; + argc -= ret; + argv += ret; + + /* Parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv, &test_params); + if (ret < 0) { + print_usage(argv[0]); + return 1; + } + + rte_log_set_global_level(RTE_LOG_INFO); + + /* If no argument provided - run all tests */ + if (test_params.num_tests == 0) + return run_all_tests(); + else + return run_parsed_tests(&test_params); +} diff --git a/app/test-bbdev/main.h b/app/test-bbdev/main.h new file mode 100644 index 00000000..20a55efc --- /dev/null +++ b/app/test-bbdev/main.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#include <stddef.h> +#include <sys/queue.h> + +#include <rte_common.h> +#include <rte_hexdump.h> +#include <rte_log.h> + +#define TEST_SUCCESS 0 +#define TEST_FAILED -1 +#define TEST_SKIPPED 1 + +#define MAX_BURST 512U +#define DEFAULT_BURST 32U +#define DEFAULT_OPS 64U + +#define TEST_ASSERT(cond, msg, ...) do { \ + if (!(cond)) { \ + printf("TestCase %s() line %d failed: " \ + msg "\n", __func__, __LINE__, ##__VA_ARGS__); \ + return TEST_FAILED; \ + } \ +} while (0) + +/* Compare two buffers (length in bytes) */ +#define TEST_ASSERT_BUFFERS_ARE_EQUAL(a, b, len, msg, ...) do { \ + if (memcmp((a), (b), len)) { \ + printf("TestCase %s() line %d failed: " \ + msg "\n", __func__, __LINE__, ##__VA_ARGS__); \ + rte_memdump(stdout, "Buffer A", (a), len); \ + rte_memdump(stdout, "Buffer B", (b), len); \ + return TEST_FAILED; \ + } \ +} while (0) + +#define TEST_ASSERT_SUCCESS(val, msg, ...) do { \ + typeof(val) _val = (val); \ + if (!(_val == 0)) { \ + printf("TestCase %s() line %d failed (err %d): " \ + msg "\n", __func__, __LINE__, _val, \ + ##__VA_ARGS__); \ + return TEST_FAILED; \ + } \ +} while (0) + +#define TEST_ASSERT_FAIL(val, msg, ...) \ + TEST_ASSERT_SUCCESS(!(val), msg, ##__VA_ARGS__) + +#define TEST_ASSERT_NOT_NULL(val, msg, ...) do { \ + if ((val) == NULL) { \ + printf("TestCase %s() line %d failed (null): " \ + msg "\n", __func__, __LINE__, ##__VA_ARGS__); \ + return TEST_FAILED; \ + } \ +} while (0) + +struct unit_test_case { + int (*setup)(void); + void (*teardown)(void); + int (*testcase)(void); + const char *name; +}; + +#define TEST_CASE(testcase) {NULL, NULL, testcase, #testcase} + +#define TEST_CASE_ST(setup, teardown, testcase) \ + {setup, teardown, testcase, #testcase} + +#define TEST_CASES_END() {NULL, NULL, NULL, NULL} + +struct unit_test_suite { + const char *suite_name; + int (*setup)(void); + void (*teardown)(void); + struct unit_test_case unit_test_cases[]; +}; + +int unit_test_suite_runner(struct unit_test_suite *suite); + +typedef int (test_callback)(void); +TAILQ_HEAD(test_commands_list, test_command); +struct test_command { + TAILQ_ENTRY(test_command) next; + const char *command; + test_callback *callback; +}; + +void add_test_command(struct test_command *t); + +/* Register a test function */ +#define REGISTER_TEST_COMMAND(name, testsuite) \ + static int test_func_##name(void) \ + { \ + return unit_test_suite_runner(&testsuite); \ + } \ + static struct test_command test_struct_##name = { \ + .command = RTE_STR(name), \ + .callback = test_func_##name, \ + }; \ + static void __attribute__((constructor, used)) \ + test_register_##name(void) \ + { \ + add_test_command(&test_struct_##name); \ + } + +const char *get_vector_filename(void); + +unsigned int get_num_ops(void); + +unsigned int get_burst_sz(void); + +unsigned int get_num_lcores(void); + +#endif diff --git a/app/test-bbdev/test-bbdev.py b/app/test-bbdev/test-bbdev.py new file mode 100755 index 00000000..ce781497 --- /dev/null +++ b/app/test-bbdev/test-bbdev.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python + +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +import sys +import os +import argparse +import subprocess +import shlex + +from threading import Timer + +def kill(process): + print "ERROR: Test app timed out" + process.kill() + +if "RTE_SDK" in os.environ: + dpdk_path = os.environ["RTE_SDK"] +else: + dpdk_path = "../.." + +if "RTE_TARGET" in os.environ: + dpdk_target = os.environ["RTE_TARGET"] +else: + dpdk_target = "x86_64-native-linuxapp-gcc" + +parser = argparse.ArgumentParser( + description='BBdev Unit Test Application', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument("-p", "--testapp-path", + help="specifies path to the bbdev test app", + default=dpdk_path + "/" + dpdk_target + "/app/testbbdev") +parser.add_argument("-e", "--eal-params", + help="EAL arguments which are passed to the test app", + default="--vdev=bbdev_null0") +parser.add_argument("-t", "--timeout", + type=int, + help="Timeout in seconds", + default=300) +parser.add_argument("-c", "--test-cases", + nargs="+", + help="Defines test cases to run. Run all if not specified") +parser.add_argument("-v", "--test-vector", + nargs="+", + help="Specifies paths to the test vector files.", + default=[dpdk_path + + "/app/test-bbdev/test_vectors/bbdev_vector_null.data"]) +parser.add_argument("-n", "--num-ops", + type=int, + help="Number of operations to process on device.", + default=32) +parser.add_argument("-b", "--burst-size", + nargs="+", + type=int, + help="Operations enqueue/dequeue burst size.", + default=[32]) +parser.add_argument("-l", "--num-lcores", + type=int, + help="Number of lcores to run.", + default=16) + +args = parser.parse_args() + +if not os.path.exists(args.testapp_path): + print "No such file: " + args.testapp_path + sys.exit(1) + +params = [args.testapp_path] +if args.eal_params: + params.extend(shlex.split(args.eal_params)) + +params.extend(["--"]) + +if args.num_ops: + params.extend(["-n", str(args.num_ops)]) + +if args.num_lcores: + params.extend(["-l", str(args.num_lcores)]) + +if args.test_cases: + params.extend(["-c"]) + params.extend([",".join(args.test_cases)]) + +exit_status = 0 +for vector in args.test_vector: + for burst_size in args.burst_size: + call_params = params[:] + call_params.extend(["-v", vector]) + call_params.extend(["-b", str(burst_size)]) + params_string = " ".join(call_params) + + print("Executing: {}".format(params_string)) + app_proc = subprocess.Popen(call_params) + if args.timeout > 0: + timer = Timer(args.timeout, kill, [app_proc]) + timer.start() + + try: + app_proc.communicate() + except: + print("Error: failed to execute: {}".format(params_string)) + finally: + timer.cancel() + + if app_proc.returncode != 0: + exit_status = 1 + print("ERROR TestCase failed. Failed test for vector {}. Return code: {}".format( + vector, app_proc.returncode)) + +sys.exit(exit_status) diff --git a/app/test-bbdev/test_bbdev.c b/app/test-bbdev/test_bbdev.c new file mode 100644 index 00000000..10579ea0 --- /dev/null +++ b/app/test-bbdev/test_bbdev.c @@ -0,0 +1,1378 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include <rte_common.h> +#include <rte_hexdump.h> +#include <rte_mbuf.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_cycles.h> + +#include <rte_bus_vdev.h> + +#include <rte_bbdev.h> +#include <rte_bbdev_op.h> +#include <rte_bbdev_pmd.h> + +#include "main.h" + + +#define BBDEV_NAME_NULL ("bbdev_null") + +struct bbdev_testsuite_params { + struct rte_bbdev_queue_conf qconf; +}; + +static struct bbdev_testsuite_params testsuite_params; + +static uint8_t null_dev_id; + +static int +testsuite_setup(void) +{ + uint8_t nb_devs; + int ret; + char buf[RTE_BBDEV_NAME_MAX_LEN]; + + /* Create test device */ + snprintf(buf, sizeof(buf), "%s_unittest", BBDEV_NAME_NULL); + ret = rte_vdev_init(buf, NULL); + TEST_ASSERT(ret == 0, "Failed to create instance of pmd: %s", buf); + + nb_devs = rte_bbdev_count(); + TEST_ASSERT(nb_devs != 0, "No devices found"); + + /* Most recently created device is our device */ + null_dev_id = nb_devs - 1; + + return TEST_SUCCESS; +} + +static void +testsuite_teardown(void) +{ + char buf[RTE_BBDEV_NAME_MAX_LEN]; + + snprintf(buf, sizeof(buf), "%s_unittest", BBDEV_NAME_NULL); + rte_vdev_uninit(buf); +} + +static int +ut_setup(void) +{ + struct bbdev_testsuite_params *ts_params = &testsuite_params; + uint8_t num_queues; + + /* Valid queue configuration */ + ts_params->qconf.priority = 0; + ts_params->qconf.socket = SOCKET_ID_ANY; + ts_params->qconf.deferred_start = 1; + + num_queues = 1; + TEST_ASSERT_SUCCESS(rte_bbdev_setup_queues(null_dev_id, num_queues, + SOCKET_ID_ANY), "Failed to setup queues for bbdev %u", + 0); + + /* Start the device */ + TEST_ASSERT_SUCCESS(rte_bbdev_start(null_dev_id), + "Failed to start bbdev %u", 0); + + return TEST_SUCCESS; +} + +static void +ut_teardown(void) +{ + rte_bbdev_close(null_dev_id); +} + +static int +test_bbdev_configure_invalid_dev_id(void) +{ + uint8_t dev_id; + uint8_t num_queues; + + num_queues = 1; + for (dev_id = 0; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++) { + if (!rte_bbdev_is_valid(dev_id)) { + TEST_ASSERT_FAIL(rte_bbdev_setup_queues(dev_id, + num_queues, SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "invalid dev_num %u", dev_id); + TEST_ASSERT(rte_bbdev_intr_enable(dev_id) == -ENODEV, + "Failed test for rte_bbdev_intr_enable: " + "invalid dev_num %u", dev_id); + break; + } + } + + return TEST_SUCCESS; +} + +static int +test_bbdev_configure_invalid_num_queues(void) +{ + struct rte_bbdev_info info; + uint8_t dev_id, num_devs; + uint8_t num_queues; + int return_value; + + TEST_ASSERT((num_devs = rte_bbdev_count()) >= 1, + "Need at least %d devices for test", 1); + + /* valid num_queues values */ + num_queues = 8; + + /* valid dev_id values */ + dev_id = null_dev_id; + + /* Stop the device in case it's started so it can be configured */ + rte_bbdev_stop(dev_id); + + TEST_ASSERT_FAIL(rte_bbdev_setup_queues(dev_id, 0, SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "invalid num_queues %d", 0); + + TEST_ASSERT_SUCCESS(rte_bbdev_setup_queues(dev_id, num_queues, + SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "invalid dev_num %u", dev_id); + + TEST_ASSERT_FAIL(return_value = rte_bbdev_info_get(dev_id, NULL), + "Failed test for rte_bbdev_info_get: " + "returned value:%i", return_value); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_info_get(dev_id, &info), + "Failed test for rte_bbdev_info_get: " + "invalid return value:%i", return_value); + + TEST_ASSERT(info.num_queues == num_queues, + "Failed test for rte_bbdev_info_get: " + "invalid num_queues:%u", info.num_queues); + + num_queues = info.drv.max_num_queues; + TEST_ASSERT_SUCCESS(rte_bbdev_setup_queues(dev_id, num_queues, + SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "invalid num_queues: %u", num_queues); + + num_queues++; + TEST_ASSERT_FAIL(rte_bbdev_setup_queues(dev_id, num_queues, + SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "invalid num_queues: %u", num_queues); + + return TEST_SUCCESS; +} + +static int +test_bbdev_configure_stop_device(void) +{ + struct rte_bbdev_info info; + uint8_t dev_id; + int return_value; + + /* valid dev_id values */ + dev_id = null_dev_id; + + /* Stop the device so it can be configured */ + rte_bbdev_stop(dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_info_get(dev_id, &info), + "Failed test for rte_bbdev_info_get: " + "invalid return value from " + "rte_bbdev_info_get function: %i", return_value); + + TEST_ASSERT_SUCCESS(info.started, "Failed test for rte_bbdev_info_get: " + "started value: %u", info.started); + + TEST_ASSERT_SUCCESS(rte_bbdev_setup_queues(dev_id, + info.drv.max_num_queues, SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "device should be stopped, dev_id: %u", dev_id); + + return_value = rte_bbdev_intr_enable(dev_id); + TEST_ASSERT(return_value != -EBUSY, + "Failed test for rte_bbdev_intr_enable: device should be stopped, dev_id: %u", + dev_id); + + /* Start the device so it cannot be configured */ + TEST_ASSERT_FAIL(rte_bbdev_start(RTE_BBDEV_MAX_DEVS), + "Failed to start bbdev %u", dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id), + "Failed to start bbdev %u", dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_info_get(dev_id, &info), + "Failed test for rte_bbdev_info_get: " + "invalid return value from " + "rte_bbdev_info_get function: %i", return_value); + + TEST_ASSERT_FAIL(info.started, "Failed test for rte_bbdev_info_get: " + "started value: %u", info.started); + + TEST_ASSERT_FAIL(rte_bbdev_setup_queues(dev_id, + info.drv.max_num_queues, SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "device should be started, dev_id: %u", dev_id); + + return_value = rte_bbdev_intr_enable(dev_id); + TEST_ASSERT(return_value == -EBUSY, + "Failed test for rte_bbdev_intr_enable: device should be started, dev_id: %u", + dev_id); + + /* Stop again the device so it can be once again configured */ + TEST_ASSERT_FAIL(rte_bbdev_stop(RTE_BBDEV_MAX_DEVS), + "Failed to start bbdev %u", dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_stop(dev_id), "Failed to stop bbdev %u", + dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_info_get(dev_id, &info), + "Failed test for rte_bbdev_info_get: " + "invalid return value from " + "rte_bbdev_info_get function: %i", return_value); + + TEST_ASSERT_SUCCESS(info.started, "Failed test for rte_bbdev_info_get: " + "started value: %u", info.started); + + TEST_ASSERT_SUCCESS(rte_bbdev_setup_queues(dev_id, + info.drv.max_num_queues, SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "device should be stopped, dev_id: %u", dev_id); + + return_value = rte_bbdev_intr_enable(dev_id); + TEST_ASSERT(return_value != -EBUSY, + "Failed test for rte_bbdev_intr_enable: device should be stopped, dev_id: %u", + dev_id); + + return TEST_SUCCESS; +} + +static int +test_bbdev_configure_stop_queue(void) +{ + struct bbdev_testsuite_params *ts_params = &testsuite_params; + struct rte_bbdev_info info; + struct rte_bbdev_queue_info qinfo; + uint8_t dev_id; + uint16_t queue_id; + int return_value; + + /* Valid dev_id values */ + dev_id = null_dev_id; + + /* Valid queue_id values */ + queue_id = 0; + + rte_bbdev_stop(dev_id); + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_info_get(dev_id, &info), + "Failed test for rte_bbdev_info_get: " + "invalid return value:%i", return_value); + + /* Valid queue configuration */ + ts_params->qconf.queue_size = info.drv.queue_size_lim; + ts_params->qconf.priority = info.drv.max_queue_priority; + + /* Device - started; queue - started */ + rte_bbdev_start(dev_id); + + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "queue:%u on device:%u should be stopped", + queue_id, dev_id); + + /* Device - stopped; queue - started */ + rte_bbdev_stop(dev_id); + + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "queue:%u on device:%u should be stopped", + queue_id, dev_id); + + TEST_ASSERT_FAIL(rte_bbdev_queue_stop(RTE_BBDEV_MAX_DEVS, queue_id), + "Failed test for rte_bbdev_queue_stop " + "invalid dev_id "); + + TEST_ASSERT_FAIL(rte_bbdev_queue_stop(dev_id, RTE_MAX_QUEUES_PER_PORT), + "Failed test for rte_bbdev_queue_stop " + "invalid queue_id "); + + /* Device - stopped; queue - stopped */ + rte_bbdev_queue_stop(dev_id, queue_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "queue:%u on device:%u should be stopped", queue_id, + dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_queue_info_get(dev_id, + queue_id, &qinfo), + "Failed test for rte_bbdev_info_get: " + "invalid return value from " + "rte_bbdev_queue_info_get function: %i", return_value); + + TEST_ASSERT(qinfo.conf.socket == ts_params->qconf.socket, + "Failed test for rte_bbdev_queue_info_get: " + "invalid queue_size:%u", qinfo.conf.socket); + + TEST_ASSERT(qinfo.conf.queue_size == ts_params->qconf.queue_size, + "Failed test for rte_bbdev_queue_info_get: " + "invalid queue_size:%u", qinfo.conf.queue_size); + + TEST_ASSERT(qinfo.conf.priority == ts_params->qconf.priority, + "Failed test for rte_bbdev_queue_info_get: " + "invalid queue_size:%u", qinfo.conf.priority); + + TEST_ASSERT(qinfo.conf.deferred_start == + ts_params->qconf.deferred_start, + "Failed test for rte_bbdev_queue_info_get: " + "invalid queue_size:%u", qinfo.conf.deferred_start); + + /* Device - started; queue - stopped */ + rte_bbdev_start(dev_id); + rte_bbdev_queue_stop(dev_id, queue_id); + + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "queue:%u on device:%u should be stopped", queue_id, + dev_id); + + rte_bbdev_stop(dev_id); + + /* After rte_bbdev_start(dev_id): + * - queue should be still stopped if deferred_start == + */ + rte_bbdev_start(dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_queue_info_get(dev_id, + queue_id, &qinfo), + "Failed test for rte_bbdev_info_get: " + "invalid return value from " + "rte_bbdev_queue_info_get function: %i", return_value); + + TEST_ASSERT(qinfo.started == 0, + "Failed test for rte_bbdev_queue_info_get: " + "invalid value for qinfo.started:%u", qinfo.started); + + rte_bbdev_stop(dev_id); + + /* After rte_bbdev_start(dev_id): + * - queue should be started if deferred_start == + */ + ts_params->qconf.deferred_start = 0; + rte_bbdev_queue_configure(dev_id, queue_id, &ts_params->qconf); + rte_bbdev_start(dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_queue_info_get(dev_id, + queue_id, &qinfo), + "Failed test for rte_bbdev_info_get: " + "invalid return value from " + "rte_bbdev_queue_info_get function: %i", return_value); + + TEST_ASSERT(qinfo.started == 1, + "Failed test for rte_bbdev_queue_info_get: " + "invalid value for qinfo.started:%u", qinfo.started); + + return TEST_SUCCESS; +} + +static int +test_bbdev_configure_invalid_queue_configure(void) +{ + struct bbdev_testsuite_params *ts_params = &testsuite_params; + int return_value; + struct rte_bbdev_info info; + uint8_t dev_id; + uint16_t queue_id; + + /* Valid dev_id values */ + dev_id = null_dev_id; + + /* Valid queue_id values */ + queue_id = 0; + + rte_bbdev_stop(dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_info_get(dev_id, &info), + "Failed test for rte_bbdev_info_get: " + "invalid return value:%i", return_value); + + rte_bbdev_queue_stop(dev_id, queue_id); + + ts_params->qconf.queue_size = info.drv.queue_size_lim + 1; + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "invalid value qconf.queue_size: %u", + ts_params->qconf.queue_size); + + ts_params->qconf.queue_size = info.drv.queue_size_lim; + ts_params->qconf.priority = info.drv.max_queue_priority + 1; + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "invalid value qconf.queue_size: %u", + ts_params->qconf.queue_size); + + ts_params->qconf.priority = info.drv.max_queue_priority; + queue_id = info.num_queues; + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "invalid value queue_id: %u", queue_id); + + queue_id = 0; + TEST_ASSERT_SUCCESS(rte_bbdev_queue_configure(dev_id, queue_id, NULL), + "Failed test for rte_bbdev_queue_configure: " + "NULL qconf structure "); + + ts_params->qconf.socket = RTE_MAX_NUMA_NODES; + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "invalid socket number "); + + ts_params->qconf.socket = SOCKET_ID_ANY; + TEST_ASSERT_SUCCESS(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "invalid value qconf.queue_size: %u", + ts_params->qconf.queue_size); + + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(RTE_BBDEV_MAX_DEVS, queue_id, + &ts_params->qconf), + "Failed test for rte_bbdev_queue_configure: " + "invalid dev_id"); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_configure(dev_id, queue_id, NULL), + "Failed test for rte_bbdev_queue_configure: " + "invalid value qconf.queue_size: %u", + ts_params->qconf.queue_size); + + return TEST_SUCCESS; +} + +static int +test_bbdev_op_pool(void) +{ + struct rte_mempool *mp; + + unsigned int dec_size = sizeof(struct rte_bbdev_dec_op); + unsigned int enc_size = sizeof(struct rte_bbdev_enc_op); + + const char *pool_dec = "Test_DEC"; + const char *pool_enc = "Test_ENC"; + + /* Valid pool configuration */ + uint32_t size = 256; + uint32_t cache_size = 128; + + TEST_ASSERT(rte_bbdev_op_pool_create(NULL, + RTE_BBDEV_OP_TURBO_DEC, size, cache_size, 0) == NULL, + "Failed test for rte_bbdev_op_pool_create: " + "NULL name parameter"); + + TEST_ASSERT((mp = rte_bbdev_op_pool_create(pool_dec, + RTE_BBDEV_OP_TURBO_DEC, size, cache_size, 0)) != NULL, + "Failed test for rte_bbdev_op_pool_create: " + "returned value is empty"); + + TEST_ASSERT(mp->size == size, + "Failed test for rte_bbdev_op_pool_create: " + "invalid size of the mempool, mp->size: %u", mp->size); + + TEST_ASSERT(mp->cache_size == cache_size, + "Failed test for rte_bbdev_op_pool_create: " + "invalid size of the mempool, mp->size: %u", + mp->cache_size); + + TEST_ASSERT_SUCCESS(strcmp(mp->name, pool_dec), + "Failed test for rte_bbdev_op_pool_create: " + "invalid name of mempool, mp->name: %s", mp->name); + + TEST_ASSERT(mp->elt_size == dec_size, + "Failed test for rte_bbdev_op_pool_create: " + "invalid element size for RTE_BBDEV_OP_TURBO_DEC, " + "mp->elt_size: %u", mp->elt_size); + + rte_mempool_free(mp); + + TEST_ASSERT((mp = rte_bbdev_op_pool_create(pool_enc, + RTE_BBDEV_OP_TURBO_ENC, size, cache_size, 0)) != NULL, + "Failed test for rte_bbdev_op_pool_create: " + "returned value is empty"); + + TEST_ASSERT(mp->elt_size == enc_size, + "Failed test for rte_bbdev_op_pool_create: " + "invalid element size for RTE_BBDEV_OP_TURBO_ENC, " + "mp->elt_size: %u", mp->elt_size); + + rte_mempool_free(mp); + + TEST_ASSERT((mp = rte_bbdev_op_pool_create("Test_NONE", + RTE_BBDEV_OP_NONE, size, cache_size, 0)) != NULL, + "Failed test for rte_bbdev_op_pool_create: " + "returned value is empty for RTE_BBDEV_OP_NONE"); + + TEST_ASSERT(mp->elt_size == (enc_size > dec_size ? enc_size : dec_size), + "Failed test for rte_bbdev_op_pool_create: " + "invalid size for RTE_BBDEV_OP_NONE, mp->elt_size: %u", + mp->elt_size); + + rte_mempool_free(mp); + + TEST_ASSERT((mp = rte_bbdev_op_pool_create("Test_INV", + RTE_BBDEV_OP_TYPE_COUNT, size, cache_size, 0)) == NULL, + "Failed test for rte_bbdev_op_pool_create: " + "returned value is not NULL for invalid type"); + + /* Invalid pool configuration */ + size = 128; + cache_size = 256; + + TEST_ASSERT((mp = rte_bbdev_op_pool_create("Test_InvSize", + RTE_BBDEV_OP_NONE, size, cache_size, 0)) == NULL, + "Failed test for rte_bbdev_op_pool_create: " + "returned value should be empty " + "because size of per-lcore local cache " + "is greater than size of the mempool."); + + return TEST_SUCCESS; +} + +/** + * Create pool of OP types RTE_BBDEV_OP_NONE, RTE_BBDEV_OP_TURBO_DEC and + * RTE_BBDEV_OP_TURBO_ENC and check that only ops of that type can be + * allocated + */ +static int +test_bbdev_op_type(void) +{ + struct rte_mempool *mp_dec; + + const unsigned int OPS_COUNT = 32; + struct rte_bbdev_dec_op *dec_ops_arr[OPS_COUNT]; + struct rte_bbdev_enc_op *enc_ops_arr[OPS_COUNT]; + + const char *pool_dec = "Test_op_dec"; + + /* Valid pool configuration */ + uint32_t num_elements = 256; + uint32_t cache_size = 128; + + /* mempool type : RTE_BBDEV_OP_TURBO_DEC */ + mp_dec = rte_bbdev_op_pool_create(pool_dec, + RTE_BBDEV_OP_TURBO_DEC, num_elements, cache_size, 0); + TEST_ASSERT(mp_dec != NULL, "Failed to create %s mempool", pool_dec); + + TEST_ASSERT(rte_bbdev_dec_op_alloc_bulk(mp_dec, dec_ops_arr, 1) == 0, + "Failed test for rte_bbdev_op_alloc_bulk TURBO_DEC: " + "OPs type: RTE_BBDEV_OP_TURBO_DEC"); + + TEST_ASSERT(rte_bbdev_enc_op_alloc_bulk(mp_dec, enc_ops_arr, 1) != 0, + "Failed test for rte_bbdev_op_alloc_bulk TURBO_DEC: " + "OPs type: RTE_BBDEV_OP_TURBO_ENC"); + + rte_mempool_free(mp_dec); + + return TEST_SUCCESS; +} + +static int +test_bbdev_op_pool_size(void) +{ + struct rte_mempool *mp_none; + + const unsigned int OPS_COUNT = 128; + struct rte_bbdev_enc_op *ops_enc_arr[OPS_COUNT]; + struct rte_bbdev_enc_op *ops_ext_arr[OPS_COUNT]; + struct rte_bbdev_enc_op *ops_ext2_arr[OPS_COUNT]; + + const char *pool_none = "Test_pool_size"; + + /* Valid pool configuration */ + uint32_t num_elements = 256; + uint32_t cache_size = 0; + + /* Create mempool type : RTE_BBDEV_OP_TURBO_ENC, size : 256 */ + mp_none = rte_bbdev_op_pool_create(pool_none, RTE_BBDEV_OP_TURBO_ENC, + num_elements, cache_size, 0); + TEST_ASSERT(mp_none != NULL, "Failed to create %s mempool", pool_none); + + /* Add 128 RTE_BBDEV_OP_TURBO_ENC ops */ + rte_bbdev_enc_op_alloc_bulk(mp_none, ops_enc_arr, OPS_COUNT); + + /* Add 128 RTE_BBDEV_OP_TURBO_ENC ops */ + TEST_ASSERT(rte_bbdev_enc_op_alloc_bulk(mp_none, ops_ext_arr, + OPS_COUNT) == 0, + "Failed test for allocating bbdev ops: " + "Mempool size: 256, Free : 128, Attempted to add: 128"); + + /* Try adding 128 more RTE_BBDEV_OP_TURBO_ENC ops, this should fail */ + TEST_ASSERT(rte_bbdev_enc_op_alloc_bulk(mp_none, ops_ext2_arr, + OPS_COUNT) != 0, + "Failed test for allocating bbdev ops: " + "Mempool size: 256, Free : 0, Attempted to add: 128"); + + /* Free-up 128 RTE_BBDEV_OP_TURBO_ENC ops */ + rte_bbdev_enc_op_free_bulk(ops_enc_arr, OPS_COUNT); + + /* Try adding 128 RTE_BBDEV_OP_TURBO_DEC ops, this should succeed */ + /* Cache size > 0 causes reallocation of ops size > 127 fail */ + TEST_ASSERT(rte_bbdev_enc_op_alloc_bulk(mp_none, ops_ext2_arr, + OPS_COUNT) == 0, + "Failed test for allocating ops after mempool freed: " + "Mempool size: 256, Free : 128, Attempted to add: 128"); + + rte_mempool_free(mp_none); + + return TEST_SUCCESS; +} + +static int +test_bbdev_count(void) +{ + uint8_t num_devs, num_valid_devs = 0; + + for (num_devs = 0; num_devs < RTE_BBDEV_MAX_DEVS; num_devs++) { + if (rte_bbdev_is_valid(num_devs)) + num_valid_devs++; + } + + num_devs = rte_bbdev_count(); + TEST_ASSERT(num_valid_devs == num_devs, + "Failed test for rte_bbdev_is_valid: " + "invalid num_devs %u ", num_devs); + + return TEST_SUCCESS; +} + +static int +test_bbdev_stats(void) +{ + uint8_t dev_id = null_dev_id; + uint16_t queue_id = 0; + struct rte_bbdev_dec_op *dec_ops[4096] = { 0 }; + struct rte_bbdev_dec_op *dec_proc_ops[4096] = { 0 }; + struct rte_bbdev_enc_op *enc_ops[4096] = { 0 }; + struct rte_bbdev_enc_op *enc_proc_ops[4096] = { 0 }; + uint16_t num_ops = 236; + struct rte_bbdev_stats stats; + struct bbdev_testsuite_params *ts_params = &testsuite_params; + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_stop(dev_id, queue_id), + "Failed to stop queue %u on device %u ", queue_id, + dev_id); + TEST_ASSERT_SUCCESS(rte_bbdev_stop(dev_id), + "Failed to stop bbdev %u ", dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed to configure queue %u on device %u ", + queue_id, dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id), + "Failed to start bbdev %u ", dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_start(dev_id, queue_id), + "Failed to start queue %u on device %u ", queue_id, + dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_start(dev_id, queue_id), + "Failed to start queue %u on device %u ", queue_id, + dev_id); + + /* Tests after enqueue operation */ + rte_bbdev_enqueue_enc_ops(dev_id, queue_id, enc_ops, num_ops); + rte_bbdev_enqueue_dec_ops(dev_id, queue_id, dec_ops, num_ops); + + TEST_ASSERT_FAIL(rte_bbdev_stats_get(RTE_BBDEV_MAX_DEVS, &stats), + "Failed test for rte_bbdev_stats_get on device %u ", + dev_id); + + TEST_ASSERT_FAIL(rte_bbdev_stats_get(dev_id, NULL), + "Failed test for rte_bbdev_stats_get on device %u ", + dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_stats_get(dev_id, &stats), + "Failed test for rte_bbdev_stats_get on device %u ", + dev_id); + + TEST_ASSERT(stats.enqueued_count == 2 * num_ops, + "Failed test for rte_bbdev_enqueue_ops: " + "invalid enqueued_count %" PRIu64 " ", + stats.enqueued_count); + + TEST_ASSERT(stats.dequeued_count == 0, + "Failed test for rte_bbdev_stats_reset: " + "invalid dequeued_count %" PRIu64 " ", + stats.dequeued_count); + + /* Tests after dequeue operation */ + rte_bbdev_dequeue_enc_ops(dev_id, queue_id, enc_proc_ops, num_ops); + rte_bbdev_dequeue_dec_ops(dev_id, queue_id, dec_proc_ops, num_ops); + + TEST_ASSERT_SUCCESS(rte_bbdev_stats_get(dev_id, &stats), + "Failed test for rte_bbdev_stats_get on device %u ", + dev_id); + + TEST_ASSERT(stats.dequeued_count == 2 * num_ops, + "Failed test for rte_bbdev_dequeue_ops: " + "invalid enqueued_count %" PRIu64 " ", + stats.dequeued_count); + + TEST_ASSERT(stats.enqueue_err_count == 0, + "Failed test for rte_bbdev_stats_reset: " + "invalid enqueue_err_count %" PRIu64 " ", + stats.enqueue_err_count); + + TEST_ASSERT(stats.dequeue_err_count == 0, + "Failed test for rte_bbdev_stats_reset: " + "invalid dequeue_err_count %" PRIu64 " ", + stats.dequeue_err_count); + + /* Tests after reset operation */ + TEST_ASSERT_FAIL(rte_bbdev_stats_reset(RTE_BBDEV_MAX_DEVS), + "Failed to reset statistic for device %u ", dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id), + "Failed to reset statistic for device %u ", dev_id); + TEST_ASSERT_SUCCESS(rte_bbdev_stats_get(dev_id, &stats), + "Failed test for rte_bbdev_stats_get on device %u ", + dev_id); + + TEST_ASSERT(stats.enqueued_count == 0, + "Failed test for rte_bbdev_stats_reset: " + "invalid enqueued_count %" PRIu64 " ", + stats.enqueued_count); + + TEST_ASSERT(stats.dequeued_count == 0, + "Failed test for rte_bbdev_stats_reset: " + "invalid dequeued_count %" PRIu64 " ", + stats.dequeued_count); + + TEST_ASSERT(stats.enqueue_err_count == 0, + "Failed test for rte_bbdev_stats_reset: " + "invalid enqueue_err_count %" PRIu64 " ", + stats.enqueue_err_count); + + TEST_ASSERT(stats.dequeue_err_count == 0, + "Failed test for rte_bbdev_stats_reset: " + "invalid dequeue_err_count %" PRIu64 " ", + stats.dequeue_err_count); + + return TEST_SUCCESS; +} + +static int +test_bbdev_driver_init(void) +{ + struct rte_bbdev *dev1, *dev2; + const char *name = "dev_name"; + char name_tmp[16]; + int num_devs, num_devs_tmp; + + dev1 = rte_bbdev_allocate(NULL); + TEST_ASSERT(dev1 == NULL, + "Failed initialize bbdev driver with NULL name"); + + dev1 = rte_bbdev_allocate(name); + TEST_ASSERT(dev1 != NULL, "Failed to initialize bbdev driver"); + + dev2 = rte_bbdev_allocate(name); + TEST_ASSERT(dev2 == NULL, + "Failed to initialize bbdev driver: " + "driver with the same name has been initialized before"); + + num_devs = rte_bbdev_count() - 1; + num_devs_tmp = num_devs; + + /* Initialize the maximum amount of devices */ + do { + sprintf(name_tmp, "%s%i", "name_", num_devs); + dev2 = rte_bbdev_allocate(name_tmp); + TEST_ASSERT(dev2 != NULL, + "Failed to initialize bbdev driver"); + ++num_devs; + } while (num_devs < (RTE_BBDEV_MAX_DEVS - 1)); + + sprintf(name_tmp, "%s%i", "name_", num_devs); + dev2 = rte_bbdev_allocate(name_tmp); + TEST_ASSERT(dev2 == NULL, "Failed to initialize bbdev driver number %d " + "more drivers than RTE_BBDEV_MAX_DEVS: %d ", num_devs, + RTE_BBDEV_MAX_DEVS); + + num_devs--; + + while (num_devs >= num_devs_tmp) { + sprintf(name_tmp, "%s%i", "name_", num_devs); + dev2 = rte_bbdev_get_named_dev(name_tmp); + TEST_ASSERT_SUCCESS(rte_bbdev_release(dev2), + "Failed to uninitialize bbdev driver %s ", + name_tmp); + num_devs--; + } + + TEST_ASSERT(dev1->data->dev_id < RTE_BBDEV_MAX_DEVS, + "Failed test rte_bbdev_allocate: " + "invalid dev_id %" PRIu8 ", max number of devices %d ", + dev1->data->dev_id, RTE_BBDEV_MAX_DEVS); + + TEST_ASSERT(dev1->state == RTE_BBDEV_INITIALIZED, + "Failed test rte_bbdev_allocate: " + "invalid state %d (0 - RTE_BBDEV_UNUSED, 1 - RTE_BBDEV_INITIALIZED", + dev1->state); + + TEST_ASSERT_FAIL(rte_bbdev_release(NULL), + "Failed to uninitialize bbdev driver with NULL bbdev"); + + sprintf(name_tmp, "%s", "invalid_name"); + dev2 = rte_bbdev_get_named_dev(name_tmp); + TEST_ASSERT_FAIL(rte_bbdev_release(dev2), + "Failed to uninitialize bbdev driver with invalid name"); + + dev2 = rte_bbdev_get_named_dev(name); + TEST_ASSERT_SUCCESS(rte_bbdev_release(dev2), + "Failed to uninitialize bbdev driver: %s ", name); + + return TEST_SUCCESS; +} + +static void +event_callback(uint16_t dev_id, enum rte_bbdev_event_type type, void *param, + void *ret_param) +{ + RTE_SET_USED(dev_id); + RTE_SET_USED(ret_param); + + if (param == NULL) + return; + + if (type == RTE_BBDEV_EVENT_UNKNOWN || + type == RTE_BBDEV_EVENT_ERROR || + type == RTE_BBDEV_EVENT_MAX) + *(int *)param = type; +} + +static int +test_bbdev_callback(void) +{ + struct rte_bbdev *dev1, *dev2; + const char *name = "dev_name1"; + const char *name2 = "dev_name2"; + int event_status; + uint8_t invalid_dev_id = RTE_BBDEV_MAX_DEVS; + enum rte_bbdev_event_type invalid_event_type = RTE_BBDEV_EVENT_MAX; + uint8_t dev_id; + + dev1 = rte_bbdev_allocate(name); + TEST_ASSERT(dev1 != NULL, "Failed to initialize bbdev driver"); + + /* + * RTE_BBDEV_EVENT_UNKNOWN - unregistered + * RTE_BBDEV_EVENT_ERROR - unregistered + */ + event_status = -1; + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); + TEST_ASSERT(event_status == -1, + "Failed test for rte_bbdev_pmd_callback_process: " + "events were not registered "); + + TEST_ASSERT_FAIL(rte_bbdev_callback_register(dev1->data->dev_id, + RTE_BBDEV_EVENT_MAX, event_callback, NULL), + "Failed to callback register for RTE_BBDEV_EVENT_MAX "); + + TEST_ASSERT_FAIL(rte_bbdev_callback_unregister(dev1->data->dev_id, + RTE_BBDEV_EVENT_MAX, event_callback, NULL), + "Failed to unregister RTE_BBDEV_EVENT_MAX "); + + /* + * RTE_BBDEV_EVENT_UNKNOWN - registered + * RTE_BBDEV_EVENT_ERROR - unregistered + */ + TEST_ASSERT_SUCCESS(rte_bbdev_callback_register(dev1->data->dev_id, + RTE_BBDEV_EVENT_UNKNOWN, event_callback, &event_status), + "Failed to callback rgstr for RTE_BBDEV_EVENT_UNKNOWN"); + + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); + TEST_ASSERT(event_status == 0, + "Failed test for rte_bbdev_pmd_callback_process " + "for RTE_BBDEV_EVENT_UNKNOWN "); + + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); + TEST_ASSERT(event_status == 0, + "Failed test for rte_bbdev_pmd_callback_process: " + "event RTE_BBDEV_EVENT_ERROR was not registered "); + + /* + * RTE_BBDEV_EVENT_UNKNOWN - registered + * RTE_BBDEV_EVENT_ERROR - registered + */ + TEST_ASSERT_SUCCESS(rte_bbdev_callback_register(dev1->data->dev_id, + RTE_BBDEV_EVENT_ERROR, event_callback, &event_status), + "Failed to callback rgstr for RTE_BBDEV_EVENT_ERROR "); + + TEST_ASSERT_SUCCESS(rte_bbdev_callback_register(dev1->data->dev_id, + RTE_BBDEV_EVENT_ERROR, event_callback, &event_status), + "Failed to callback register for RTE_BBDEV_EVENT_ERROR" + "(re-registration) "); + + event_status = -1; + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); + TEST_ASSERT(event_status == 0, + "Failed test for rte_bbdev_pmd_callback_process " + "for RTE_BBDEV_EVENT_UNKNOWN "); + + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); + TEST_ASSERT(event_status == 1, + "Failed test for rte_bbdev_pmd_callback_process " + "for RTE_BBDEV_EVENT_ERROR "); + + /* + * RTE_BBDEV_EVENT_UNKNOWN - registered + * RTE_BBDEV_EVENT_ERROR - unregistered + */ + TEST_ASSERT_SUCCESS(rte_bbdev_callback_unregister(dev1->data->dev_id, + RTE_BBDEV_EVENT_ERROR, event_callback, &event_status), + "Failed to unregister RTE_BBDEV_EVENT_ERROR "); + + event_status = -1; + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); + TEST_ASSERT(event_status == 0, + "Failed test for rte_bbdev_pmd_callback_process " + "for RTE_BBDEV_EVENT_UNKNOWN "); + + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); + TEST_ASSERT(event_status == 0, + "Failed test for rte_bbdev_pmd_callback_process: " + "event RTE_BBDEV_EVENT_ERROR was unregistered "); + + /* rte_bbdev_callback_register with invalid inputs */ + TEST_ASSERT_FAIL(rte_bbdev_callback_register(invalid_dev_id, + RTE_BBDEV_EVENT_ERROR, event_callback, &event_status), + "Failed test for rte_bbdev_callback_register " + "for invalid_dev_id "); + + TEST_ASSERT_FAIL(rte_bbdev_callback_register(dev1->data->dev_id, + invalid_event_type, event_callback, &event_status), + "Failed to callback register for invalid event type "); + + TEST_ASSERT_FAIL(rte_bbdev_callback_register(dev1->data->dev_id, + RTE_BBDEV_EVENT_ERROR, NULL, &event_status), + "Failed to callback register - no callback function "); + + /* The impact of devices on each other */ + dev2 = rte_bbdev_allocate(name2); + TEST_ASSERT(dev2 != NULL, + "Failed to initialize bbdev driver"); + + /* + * dev2: + * RTE_BBDEV_EVENT_UNKNOWN - unregistered + * RTE_BBDEV_EVENT_ERROR - unregistered + */ + event_status = -1; + rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_UNKNOWN, NULL); + rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_ERROR, NULL); + TEST_ASSERT(event_status == -1, + "Failed test for rte_bbdev_pmd_callback_process: " + "events were not registered "); + + /* + * dev1: RTE_BBDEV_EVENT_ERROR - unregistered + * dev2: RTE_BBDEV_EVENT_ERROR - registered + */ + TEST_ASSERT_SUCCESS(rte_bbdev_callback_register(dev2->data->dev_id, + RTE_BBDEV_EVENT_ERROR, event_callback, &event_status), + "Failed to callback rgstr for RTE_BBDEV_EVENT_ERROR"); + + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); + TEST_ASSERT(event_status == -1, + "Failed test for rte_bbdev_pmd_callback_process in dev1 " + "for RTE_BBDEV_EVENT_ERROR "); + + rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_ERROR, NULL); + TEST_ASSERT(event_status == 1, + "Failed test for rte_bbdev_pmd_callback_process in dev2 " + "for RTE_BBDEV_EVENT_ERROR "); + + /* + * dev1: RTE_BBDEV_EVENT_UNKNOWN - registered + * dev2: RTE_BBDEV_EVENT_UNKNOWN - unregistered + */ + TEST_ASSERT_SUCCESS(rte_bbdev_callback_register(dev2->data->dev_id, + RTE_BBDEV_EVENT_UNKNOWN, event_callback, &event_status), + "Failed to callback register for RTE_BBDEV_EVENT_UNKNOWN " + "in dev 2 "); + + rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_UNKNOWN, NULL); + TEST_ASSERT(event_status == 0, + "Failed test for rte_bbdev_pmd_callback_process in dev2" + " for RTE_BBDEV_EVENT_UNKNOWN "); + + TEST_ASSERT_SUCCESS(rte_bbdev_callback_unregister(dev2->data->dev_id, + RTE_BBDEV_EVENT_UNKNOWN, event_callback, &event_status), + "Failed to unregister RTE_BBDEV_EVENT_UNKNOWN "); + + TEST_ASSERT_SUCCESS(rte_bbdev_callback_unregister(dev2->data->dev_id, + RTE_BBDEV_EVENT_UNKNOWN, event_callback, &event_status), + "Failed to unregister RTE_BBDEV_EVENT_UNKNOWN : " + "unregister function called once again "); + + event_status = -1; + rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_UNKNOWN, NULL); + TEST_ASSERT(event_status == -1, + "Failed test for rte_bbdev_pmd_callback_process in dev2" + " for RTE_BBDEV_EVENT_UNKNOWN "); + + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); + TEST_ASSERT(event_status == 0, + "Failed test for rte_bbdev_pmd_callback_process in dev2 " + "for RTE_BBDEV_EVENT_UNKNOWN "); + + /* rte_bbdev_pmd_callback_process with invalid inputs */ + rte_bbdev_pmd_callback_process(NULL, RTE_BBDEV_EVENT_UNKNOWN, NULL); + + event_status = -1; + rte_bbdev_pmd_callback_process(dev1, invalid_event_type, NULL); + TEST_ASSERT(event_status == -1, + "Failed test for rte_bbdev_pmd_callback_process: " + "for invalid event type "); + + /* rte_dev_callback_unregister with invalid inputs */ + TEST_ASSERT_FAIL(rte_bbdev_callback_unregister(invalid_dev_id, + RTE_BBDEV_EVENT_UNKNOWN, event_callback, &event_status), + "Failed test for rte_dev_callback_unregister " + "for invalid_dev_id "); + + TEST_ASSERT_FAIL(rte_bbdev_callback_unregister(dev1->data->dev_id, + invalid_event_type, event_callback, &event_status), + "Failed rte_dev_callback_unregister " + "for invalid event type "); + + TEST_ASSERT_FAIL(rte_bbdev_callback_unregister(dev1->data->dev_id, + invalid_event_type, NULL, &event_status), + "Failed rte_dev_callback_unregister " + "when no callback function "); + + dev_id = dev1->data->dev_id; + + rte_bbdev_release(dev1); + rte_bbdev_release(dev2); + + TEST_ASSERT_FAIL(rte_bbdev_callback_register(dev_id, + RTE_BBDEV_EVENT_ERROR, event_callback, &event_status), + "Failed test for rte_bbdev_callback_register: " + "function called after rte_bbdev_driver_uninit ."); + + TEST_ASSERT_FAIL(rte_bbdev_callback_unregister(dev_id, + RTE_BBDEV_EVENT_ERROR, event_callback, &event_status), + "Failed test for rte_dev_callback_unregister: " + "function called after rte_bbdev_driver_uninit. "); + + event_status = -1; + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL); + rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL); + rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_UNKNOWN, NULL); + rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_ERROR, NULL); + TEST_ASSERT(event_status == -1, + "Failed test for rte_bbdev_pmd_callback_process: " + "callback function was called after rte_bbdev_driver_uninit"); + + return TEST_SUCCESS; +} + +static int +test_bbdev_invalid_driver(void) +{ + struct rte_bbdev dev1, *dev2; + uint8_t dev_id = null_dev_id; + uint16_t queue_id = 0; + struct rte_bbdev_stats stats; + struct bbdev_testsuite_params *ts_params = &testsuite_params; + struct rte_bbdev_queue_info qinfo; + struct rte_bbdev_ops dev_ops_tmp; + + TEST_ASSERT_SUCCESS(rte_bbdev_stop(dev_id), "Failed to stop bbdev %u ", + dev_id); + + dev1 = rte_bbdev_devices[dev_id]; + dev2 = &rte_bbdev_devices[dev_id]; + + /* Tests for rte_bbdev_setup_queues */ + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_setup_queues(dev_id, 1, SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "NULL dev_ops structure "); + dev2->dev_ops = dev1.dev_ops; + + dev_ops_tmp = *dev2->dev_ops; + dev_ops_tmp.info_get = NULL; + dev2->dev_ops = &dev_ops_tmp; + TEST_ASSERT_FAIL(rte_bbdev_setup_queues(dev_id, 1, SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "NULL info_get "); + dev2->dev_ops = dev1.dev_ops; + + dev_ops_tmp = *dev2->dev_ops; + dev_ops_tmp.queue_release = NULL; + dev2->dev_ops = &dev_ops_tmp; + TEST_ASSERT_FAIL(rte_bbdev_setup_queues(dev_id, 1, SOCKET_ID_ANY), + "Failed test for rte_bbdev_setup_queues: " + "NULL queue_release "); + dev2->dev_ops = dev1.dev_ops; + + dev2->data->socket_id = SOCKET_ID_ANY; + TEST_ASSERT_SUCCESS(rte_bbdev_setup_queues(dev_id, 1, + SOCKET_ID_ANY), "Failed to configure bbdev %u", dev_id); + + /* Test for rte_bbdev_queue_configure */ + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed to configure queue %u on device %u " + "with NULL dev_ops structure ", queue_id, dev_id); + dev2->dev_ops = dev1.dev_ops; + + dev_ops_tmp = *dev2->dev_ops; + dev_ops_tmp.queue_setup = NULL; + dev2->dev_ops = &dev_ops_tmp; + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed to configure queue %u on device %u " + "with NULL queue_setup ", queue_id, dev_id); + dev2->dev_ops = dev1.dev_ops; + + dev_ops_tmp = *dev2->dev_ops; + dev_ops_tmp.info_get = NULL; + dev2->dev_ops = &dev_ops_tmp; + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed to configure queue %u on device %u " + "with NULL info_get ", queue_id, dev_id); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_FAIL(rte_bbdev_queue_configure(RTE_BBDEV_MAX_DEVS, + queue_id, &ts_params->qconf), + "Failed to configure queue %u on device %u ", + queue_id, dev_id); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_configure(dev_id, queue_id, + &ts_params->qconf), + "Failed to configure queue %u on device %u ", + queue_id, dev_id); + + /* Test for rte_bbdev_queue_info_get */ + dev2->dev_ops = NULL; + TEST_ASSERT_SUCCESS(rte_bbdev_queue_info_get(dev_id, queue_id, &qinfo), + "Failed test for rte_bbdev_info_get: " + "NULL dev_ops structure "); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_FAIL(rte_bbdev_queue_info_get(RTE_BBDEV_MAX_DEVS, + queue_id, &qinfo), + "Failed test for rte_bbdev_info_get: " + "invalid dev_id "); + + TEST_ASSERT_FAIL(rte_bbdev_queue_info_get(dev_id, + RTE_MAX_QUEUES_PER_PORT, &qinfo), + "Failed test for rte_bbdev_info_get: " + "invalid queue_id "); + + TEST_ASSERT_FAIL(rte_bbdev_queue_info_get(dev_id, queue_id, NULL), + "Failed test for rte_bbdev_info_get: " + "invalid dev_info "); + + /* Test for rte_bbdev_start */ + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_start(dev_id), + "Failed to start bbdev %u " + "with NULL dev_ops structure ", dev_id); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id), + "Failed to start bbdev %u ", dev_id); + + /* Test for rte_bbdev_queue_start */ + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_queue_start(dev_id, queue_id), + "Failed to start queue %u on device %u: " + "NULL dev_ops structure", queue_id, dev_id); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_start(dev_id, queue_id), + "Failed to start queue %u on device %u ", queue_id, + dev_id); + + /* Tests for rte_bbdev_stats_get */ + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_stats_get(dev_id, &stats), + "Failed test for rte_bbdev_stats_get on device %u ", + dev_id); + dev2->dev_ops = dev1.dev_ops; + + dev_ops_tmp = *dev2->dev_ops; + dev_ops_tmp.stats_reset = NULL; + dev2->dev_ops = &dev_ops_tmp; + TEST_ASSERT_SUCCESS(rte_bbdev_stats_get(dev_id, &stats), + "Failed test for rte_bbdev_stats_get: " + "NULL stats_get "); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_SUCCESS(rte_bbdev_stats_get(dev_id, &stats), + "Failed test for rte_bbdev_stats_get on device %u ", + dev_id); + + /* + * Tests for: + * rte_bbdev_callback_register, + * rte_bbdev_pmd_callback_process, + * rte_dev_callback_unregister + */ + dev2->dev_ops = NULL; + TEST_ASSERT_SUCCESS(rte_bbdev_callback_register(dev_id, + RTE_BBDEV_EVENT_UNKNOWN, event_callback, NULL), + "Failed to callback rgstr for RTE_BBDEV_EVENT_UNKNOWN"); + rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_UNKNOWN, NULL); + + TEST_ASSERT_SUCCESS(rte_bbdev_callback_unregister(dev_id, + RTE_BBDEV_EVENT_UNKNOWN, event_callback, NULL), + "Failed to unregister RTE_BBDEV_EVENT_ERROR "); + dev2->dev_ops = dev1.dev_ops; + + /* Tests for rte_bbdev_stats_reset */ + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_stats_reset(dev_id), + "Failed to reset statistic for device %u ", dev_id); + dev2->dev_ops = dev1.dev_ops; + + dev_ops_tmp = *dev2->dev_ops; + dev_ops_tmp.stats_reset = NULL; + dev2->dev_ops = &dev_ops_tmp; + TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id), + "Failed test for rte_bbdev_stats_reset: " + "NULL stats_reset "); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id), + "Failed to reset statistic for device %u ", dev_id); + + /* Tests for rte_bbdev_queue_stop */ + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_queue_stop(dev_id, queue_id), + "Failed to stop queue %u on device %u: " + "NULL dev_ops structure", queue_id, dev_id); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_stop(dev_id, queue_id), + "Failed to stop queue %u on device %u ", queue_id, + dev_id); + + /* Tests for rte_bbdev_stop */ + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_stop(dev_id), + "Failed to stop bbdev %u with NULL dev_ops structure ", + dev_id); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_SUCCESS(rte_bbdev_stop(dev_id), + "Failed to stop bbdev %u ", dev_id); + + /* Tests for rte_bbdev_close */ + TEST_ASSERT_FAIL(rte_bbdev_close(RTE_BBDEV_MAX_DEVS), + "Failed to close bbdev with invalid dev_id"); + + dev2->dev_ops = NULL; + TEST_ASSERT_FAIL(rte_bbdev_close(dev_id), + "Failed to close bbdev %u with NULL dev_ops structure ", + dev_id); + dev2->dev_ops = dev1.dev_ops; + + TEST_ASSERT_SUCCESS(rte_bbdev_close(dev_id), + "Failed to close bbdev %u ", dev_id); + + return TEST_SUCCESS; +} + +static int +test_bbdev_get_named_dev(void) +{ + struct rte_bbdev *dev, *dev_tmp; + const char *name = "name"; + + dev = rte_bbdev_allocate(name); + TEST_ASSERT(dev != NULL, "Failed to initialize bbdev driver"); + + dev_tmp = rte_bbdev_get_named_dev(NULL); + TEST_ASSERT(dev_tmp == NULL, "Failed test for rte_bbdev_get_named_dev: " + "function called with NULL parameter"); + + dev_tmp = rte_bbdev_get_named_dev(name); + + TEST_ASSERT(dev == dev_tmp, "Failed test for rte_bbdev_get_named_dev: " + "wrong device was returned "); + + TEST_ASSERT_SUCCESS(rte_bbdev_release(dev), + "Failed to uninitialize bbdev driver %s ", name); + + return TEST_SUCCESS; +} + +static struct unit_test_suite bbdev_null_testsuite = { + .suite_name = "BBDEV NULL Unit Test Suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + + TEST_CASE(test_bbdev_configure_invalid_dev_id), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_configure_invalid_num_queues), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_configure_stop_device), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_configure_stop_queue), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_configure_invalid_queue_configure), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_op_pool), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_op_type), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_op_pool_size), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_stats), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_driver_init), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_callback), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_invalid_driver), + + TEST_CASE_ST(ut_setup, ut_teardown, + test_bbdev_get_named_dev), + + TEST_CASE(test_bbdev_count), + + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +REGISTER_TEST_COMMAND(unittest, bbdev_null_testsuite); diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c new file mode 100644 index 00000000..00f3b085 --- /dev/null +++ b/app/test-bbdev/test_bbdev_perf.c @@ -0,0 +1,2138 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include <stdio.h> +#include <inttypes.h> + +#include <rte_eal.h> +#include <rte_common.h> +#include <rte_dev.h> +#include <rte_launch.h> +#include <rte_bbdev.h> +#include <rte_cycles.h> +#include <rte_lcore.h> +#include <rte_malloc.h> +#include <rte_random.h> +#include <rte_hexdump.h> + +#include "main.h" +#include "test_bbdev_vector.h" + +#define GET_SOCKET(socket_id) (((socket_id) == SOCKET_ID_ANY) ? 0 : (socket_id)) + +#define MAX_QUEUES RTE_MAX_LCORE + +#define OPS_CACHE_SIZE 256U +#define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */ + +#define SYNC_WAIT 0 +#define SYNC_START 1 + +#define INVALID_QUEUE_ID -1 + +static struct test_bbdev_vector test_vector; + +/* Switch between PMD and Interrupt for throughput TC */ +static bool intr_enabled; + +/* Represents tested active devices */ +static struct active_device { + const char *driver_name; + uint8_t dev_id; + uint16_t supported_ops; + uint16_t queue_ids[MAX_QUEUES]; + uint16_t nb_queues; + struct rte_mempool *ops_mempool; + struct rte_mempool *in_mbuf_pool; + struct rte_mempool *hard_out_mbuf_pool; + struct rte_mempool *soft_out_mbuf_pool; +} active_devs[RTE_BBDEV_MAX_DEVS]; + +static uint8_t nb_active_devs; + +/* Data buffers used by BBDEV ops */ +struct test_buffers { + struct rte_bbdev_op_data *inputs; + struct rte_bbdev_op_data *hard_outputs; + struct rte_bbdev_op_data *soft_outputs; +}; + +/* Operation parameters specific for given test case */ +struct test_op_params { + struct rte_mempool *mp; + struct rte_bbdev_dec_op *ref_dec_op; + struct rte_bbdev_enc_op *ref_enc_op; + uint16_t burst_sz; + uint16_t num_to_process; + uint16_t num_lcores; + int vector_mask; + rte_atomic16_t sync; + struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES]; +}; + +/* Contains per lcore params */ +struct thread_params { + uint8_t dev_id; + uint16_t queue_id; + uint64_t start_time; + double mops; + double mbps; + rte_atomic16_t nb_dequeued; + rte_atomic16_t processing_status; + struct test_op_params *op_params; +}; + +typedef int (test_case_function)(struct active_device *ad, + struct test_op_params *op_params); + +static inline void +set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type) +{ + ad->supported_ops |= (1 << op_type); +} + +static inline bool +is_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type) +{ + return ad->supported_ops & (1 << op_type); +} + +static inline bool +flags_match(uint32_t flags_req, uint32_t flags_present) +{ + return (flags_req & flags_present) == flags_req; +} + +static void +clear_soft_out_cap(uint32_t *op_flags) +{ + *op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT; + *op_flags &= ~RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT; + *op_flags &= ~RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT; +} + +static int +check_dev_cap(const struct rte_bbdev_info *dev_info) +{ + unsigned int i; + unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs; + const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities; + + nb_inputs = test_vector.entries[DATA_INPUT].nb_segments; + nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments; + nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments; + + for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) { + if (op_cap->type != test_vector.op_type) + continue; + + if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) { + const struct rte_bbdev_op_cap_turbo_dec *cap = + &op_cap->cap.turbo_dec; + /* Ignore lack of soft output capability, just skip + * checking if soft output is valid. + */ + if ((test_vector.turbo_dec.op_flags & + RTE_BBDEV_TURBO_SOFT_OUTPUT) && + !(cap->capability_flags & + RTE_BBDEV_TURBO_SOFT_OUTPUT)) { + printf( + "WARNING: Device \"%s\" does not support soft output - soft output flags will be ignored.\n", + dev_info->dev_name); + clear_soft_out_cap( + &test_vector.turbo_dec.op_flags); + } + + if (!flags_match(test_vector.turbo_dec.op_flags, + cap->capability_flags)) + return TEST_FAILED; + if (nb_inputs > cap->num_buffers_src) { + printf("Too many inputs defined: %u, max: %u\n", + nb_inputs, cap->num_buffers_src); + return TEST_FAILED; + } + if (nb_soft_outputs > cap->num_buffers_soft_out && + (test_vector.turbo_dec.op_flags & + RTE_BBDEV_TURBO_SOFT_OUTPUT)) { + printf( + "Too many soft outputs defined: %u, max: %u\n", + nb_soft_outputs, + cap->num_buffers_soft_out); + return TEST_FAILED; + } + if (nb_hard_outputs > cap->num_buffers_hard_out) { + printf( + "Too many hard outputs defined: %u, max: %u\n", + nb_hard_outputs, + cap->num_buffers_hard_out); + return TEST_FAILED; + } + if (intr_enabled && !(cap->capability_flags & + RTE_BBDEV_TURBO_DEC_INTERRUPTS)) { + printf( + "Dequeue interrupts are not supported!\n"); + return TEST_FAILED; + } + + return TEST_SUCCESS; + } else if (op_cap->type == RTE_BBDEV_OP_TURBO_ENC) { + const struct rte_bbdev_op_cap_turbo_enc *cap = + &op_cap->cap.turbo_enc; + + if (!flags_match(test_vector.turbo_enc.op_flags, + cap->capability_flags)) + return TEST_FAILED; + if (nb_inputs > cap->num_buffers_src) { + printf("Too many inputs defined: %u, max: %u\n", + nb_inputs, cap->num_buffers_src); + return TEST_FAILED; + } + if (nb_hard_outputs > cap->num_buffers_dst) { + printf( + "Too many hard outputs defined: %u, max: %u\n", + nb_hard_outputs, cap->num_buffers_src); + return TEST_FAILED; + } + if (intr_enabled && !(cap->capability_flags & + RTE_BBDEV_TURBO_ENC_INTERRUPTS)) { + printf( + "Dequeue interrupts are not supported!\n"); + return TEST_FAILED; + } + + return TEST_SUCCESS; + } + } + + if ((i == 0) && (test_vector.op_type == RTE_BBDEV_OP_NONE)) + return TEST_SUCCESS; /* Special case for NULL device */ + + return TEST_FAILED; +} + +/* calculates optimal mempool size not smaller than the val */ +static unsigned int +optimal_mempool_size(unsigned int val) +{ + return rte_align32pow2(val + 1) - 1; +} + +/* allocates mbuf mempool for inputs and outputs */ +static struct rte_mempool * +create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id, + int socket_id, unsigned int mbuf_pool_size, + const char *op_type_str) +{ + unsigned int i; + uint32_t max_seg_sz = 0; + char pool_name[RTE_MEMPOOL_NAMESIZE]; + + /* find max input segment size */ + for (i = 0; i < entries->nb_segments; ++i) + if (entries->segments[i].length > max_seg_sz) + max_seg_sz = entries->segments[i].length; + + snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str, + dev_id); + return rte_pktmbuf_pool_create(pool_name, mbuf_pool_size, 0, 0, + RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM, + (unsigned int)RTE_MBUF_DEFAULT_BUF_SIZE), socket_id); +} + +static int +create_mempools(struct active_device *ad, int socket_id, + enum rte_bbdev_op_type op_type, uint16_t num_ops) +{ + struct rte_mempool *mp; + unsigned int ops_pool_size, mbuf_pool_size = 0; + char pool_name[RTE_MEMPOOL_NAMESIZE]; + const char *op_type_str; + + struct op_data_entries *in = &test_vector.entries[DATA_INPUT]; + struct op_data_entries *hard_out = + &test_vector.entries[DATA_HARD_OUTPUT]; + struct op_data_entries *soft_out = + &test_vector.entries[DATA_SOFT_OUTPUT]; + + /* allocate ops mempool */ + ops_pool_size = optimal_mempool_size(RTE_MAX( + /* Ops used plus 1 reference op */ + RTE_MAX((unsigned int)(ad->nb_queues * num_ops + 1), + /* Minimal cache size plus 1 reference op */ + (unsigned int)(1.5 * rte_lcore_count() * + OPS_CACHE_SIZE + 1)), + OPS_POOL_SIZE_MIN)); + + op_type_str = rte_bbdev_op_type_str(op_type); + TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type); + + snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str, + ad->dev_id); + mp = rte_bbdev_op_pool_create(pool_name, op_type, + ops_pool_size, OPS_CACHE_SIZE, socket_id); + TEST_ASSERT_NOT_NULL(mp, + "ERROR Failed to create %u items ops pool for dev %u on socket %u.", + ops_pool_size, + ad->dev_id, + socket_id); + ad->ops_mempool = mp; + + /* Inputs */ + mbuf_pool_size = optimal_mempool_size(ops_pool_size * in->nb_segments); + mp = create_mbuf_pool(in, ad->dev_id, socket_id, mbuf_pool_size, "in"); + TEST_ASSERT_NOT_NULL(mp, + "ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.", + mbuf_pool_size, + ad->dev_id, + socket_id); + ad->in_mbuf_pool = mp; + + /* Hard outputs */ + mbuf_pool_size = optimal_mempool_size(ops_pool_size * + hard_out->nb_segments); + mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id, mbuf_pool_size, + "hard_out"); + TEST_ASSERT_NOT_NULL(mp, + "ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.", + mbuf_pool_size, + ad->dev_id, + socket_id); + ad->hard_out_mbuf_pool = mp; + + if (soft_out->nb_segments == 0) + return TEST_SUCCESS; + + /* Soft outputs */ + mbuf_pool_size = optimal_mempool_size(ops_pool_size * + soft_out->nb_segments); + mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id, mbuf_pool_size, + "soft_out"); + TEST_ASSERT_NOT_NULL(mp, + "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.", + mbuf_pool_size, + ad->dev_id, + socket_id); + ad->soft_out_mbuf_pool = mp; + + return 0; +} + +static int +add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info, + struct test_bbdev_vector *vector) +{ + int ret; + unsigned int queue_id; + struct rte_bbdev_queue_conf qconf; + struct active_device *ad = &active_devs[nb_active_devs]; + unsigned int nb_queues; + enum rte_bbdev_op_type op_type = vector->op_type; + + nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues); + /* setup device */ + ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id); + if (ret < 0) { + printf("rte_bbdev_setup_queues(%u, %u, %d) ret %i\n", + dev_id, nb_queues, info->socket_id, ret); + return TEST_FAILED; + } + + /* configure interrupts if needed */ + if (intr_enabled) { + ret = rte_bbdev_intr_enable(dev_id); + if (ret < 0) { + printf("rte_bbdev_intr_enable(%u) ret %i\n", dev_id, + ret); + return TEST_FAILED; + } + } + + /* setup device queues */ + qconf.socket = info->socket_id; + qconf.queue_size = info->drv.default_queue_conf.queue_size; + qconf.priority = 0; + qconf.deferred_start = 0; + qconf.op_type = op_type; + + for (queue_id = 0; queue_id < nb_queues; ++queue_id) { + ret = rte_bbdev_queue_configure(dev_id, queue_id, &qconf); + if (ret != 0) { + printf( + "Allocated all queues (id=%u) at prio%u on dev%u\n", + queue_id, qconf.priority, dev_id); + qconf.priority++; + ret = rte_bbdev_queue_configure(ad->dev_id, queue_id, + &qconf); + } + if (ret != 0) { + printf("All queues on dev %u allocated: %u\n", + dev_id, queue_id); + break; + } + ad->queue_ids[queue_id] = queue_id; + } + TEST_ASSERT(queue_id != 0, + "ERROR Failed to configure any queues on dev %u", + dev_id); + ad->nb_queues = queue_id; + + set_avail_op(ad, op_type); + + return TEST_SUCCESS; +} + +static int +add_active_device(uint8_t dev_id, struct rte_bbdev_info *info, + struct test_bbdev_vector *vector) +{ + int ret; + + active_devs[nb_active_devs].driver_name = info->drv.driver_name; + active_devs[nb_active_devs].dev_id = dev_id; + + ret = add_bbdev_dev(dev_id, info, vector); + if (ret == TEST_SUCCESS) + ++nb_active_devs; + return ret; +} + +static uint8_t +populate_active_devices(void) +{ + int ret; + uint8_t dev_id; + uint8_t nb_devs_added = 0; + struct rte_bbdev_info info; + + RTE_BBDEV_FOREACH(dev_id) { + rte_bbdev_info_get(dev_id, &info); + + if (check_dev_cap(&info)) { + printf( + "Device %d (%s) does not support specified capabilities\n", + dev_id, info.dev_name); + continue; + } + + ret = add_active_device(dev_id, &info, &test_vector); + if (ret != 0) { + printf("Adding active bbdev %s skipped\n", + info.dev_name); + continue; + } + nb_devs_added++; + } + + return nb_devs_added; +} + +static int +read_test_vector(void) +{ + int ret; + + memset(&test_vector, 0, sizeof(test_vector)); + printf("Test vector file = %s\n", get_vector_filename()); + ret = test_bbdev_vector_read(get_vector_filename(), &test_vector); + TEST_ASSERT_SUCCESS(ret, "Failed to parse file %s\n", + get_vector_filename()); + + return TEST_SUCCESS; +} + +static int +testsuite_setup(void) +{ + TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n"); + + if (populate_active_devices() == 0) { + printf("No suitable devices found!\n"); + return TEST_SKIPPED; + } + + return TEST_SUCCESS; +} + +static int +interrupt_testsuite_setup(void) +{ + TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n"); + + /* Enable interrupts */ + intr_enabled = true; + + /* Special case for NULL device (RTE_BBDEV_OP_NONE) */ + if (populate_active_devices() == 0 || + test_vector.op_type == RTE_BBDEV_OP_NONE) { + intr_enabled = false; + printf("No suitable devices found!\n"); + return TEST_SKIPPED; + } + + return TEST_SUCCESS; +} + +static void +testsuite_teardown(void) +{ + uint8_t dev_id; + + /* Unconfigure devices */ + RTE_BBDEV_FOREACH(dev_id) + rte_bbdev_close(dev_id); + + /* Clear active devices structs. */ + memset(active_devs, 0, sizeof(active_devs)); + nb_active_devs = 0; +} + +static int +ut_setup(void) +{ + uint8_t i, dev_id; + + for (i = 0; i < nb_active_devs; i++) { + dev_id = active_devs[i].dev_id; + /* reset bbdev stats */ + TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id), + "Failed to reset stats of bbdev %u", dev_id); + /* start the device */ + TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id), + "Failed to start bbdev %u", dev_id); + } + + return TEST_SUCCESS; +} + +static void +ut_teardown(void) +{ + uint8_t i, dev_id; + struct rte_bbdev_stats stats; + + for (i = 0; i < nb_active_devs; i++) { + dev_id = active_devs[i].dev_id; + /* read stats and print */ + rte_bbdev_stats_get(dev_id, &stats); + /* Stop the device */ + rte_bbdev_stop(dev_id); + } +} + +static int +init_op_data_objs(struct rte_bbdev_op_data *bufs, + struct op_data_entries *ref_entries, + struct rte_mempool *mbuf_pool, const uint16_t n, + enum op_data_type op_type, uint16_t min_alignment) +{ + int ret; + unsigned int i, j; + + for (i = 0; i < n; ++i) { + char *data; + struct op_data_buf *seg = &ref_entries->segments[0]; + struct rte_mbuf *m_head = rte_pktmbuf_alloc(mbuf_pool); + TEST_ASSERT_NOT_NULL(m_head, + "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)", + op_type, n * ref_entries->nb_segments, + mbuf_pool->size); + + bufs[i].data = m_head; + bufs[i].offset = 0; + bufs[i].length = 0; + + if (op_type == DATA_INPUT) { + data = rte_pktmbuf_append(m_head, seg->length); + TEST_ASSERT_NOT_NULL(data, + "Couldn't append %u bytes to mbuf from %d data type mbuf pool", + seg->length, op_type); + + TEST_ASSERT(data == RTE_PTR_ALIGN(data, min_alignment), + "Data addr in mbuf (%p) is not aligned to device min alignment (%u)", + data, min_alignment); + rte_memcpy(data, seg->addr, seg->length); + bufs[i].length += seg->length; + + + for (j = 1; j < ref_entries->nb_segments; ++j) { + struct rte_mbuf *m_tail = + rte_pktmbuf_alloc(mbuf_pool); + TEST_ASSERT_NOT_NULL(m_tail, + "Not enough mbufs in %d data type mbuf pool (needed %u, available %u)", + op_type, + n * ref_entries->nb_segments, + mbuf_pool->size); + seg += 1; + + data = rte_pktmbuf_append(m_tail, seg->length); + TEST_ASSERT_NOT_NULL(data, + "Couldn't append %u bytes to mbuf from %d data type mbuf pool", + seg->length, op_type); + + TEST_ASSERT(data == RTE_PTR_ALIGN(data, + min_alignment), + "Data addr in mbuf (%p) is not aligned to device min alignment (%u)", + data, min_alignment); + rte_memcpy(data, seg->addr, seg->length); + bufs[i].length += seg->length; + + ret = rte_pktmbuf_chain(m_head, m_tail); + TEST_ASSERT_SUCCESS(ret, + "Couldn't chain mbufs from %d data type mbuf pool", + op_type); + } + } + } + + return 0; +} + +static int +allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len, + const int socket) +{ + int i; + + *buffers = rte_zmalloc_socket(NULL, len, 0, socket); + if (*buffers == NULL) { + printf("WARNING: Failed to allocate op_data on socket %d\n", + socket); + /* try to allocate memory on other detected sockets */ + for (i = 0; i < socket; i++) { + *buffers = rte_zmalloc_socket(NULL, len, 0, i); + if (*buffers != NULL) + break; + } + } + + return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS; +} + +static int +fill_queue_buffers(struct test_op_params *op_params, + struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp, + struct rte_mempool *soft_out_mp, uint16_t queue_id, + uint16_t min_alignment, const int socket_id) +{ + int ret; + enum op_data_type type; + const uint16_t n = op_params->num_to_process; + + struct rte_mempool *mbuf_pools[DATA_NUM_TYPES] = { + in_mp, + soft_out_mp, + hard_out_mp, + }; + + struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = { + &op_params->q_bufs[socket_id][queue_id].inputs, + &op_params->q_bufs[socket_id][queue_id].soft_outputs, + &op_params->q_bufs[socket_id][queue_id].hard_outputs, + }; + + for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) { + struct op_data_entries *ref_entries = + &test_vector.entries[type]; + if (ref_entries->nb_segments == 0) + continue; + + ret = allocate_buffers_on_socket(queue_ops[type], + n * sizeof(struct rte_bbdev_op_data), + socket_id); + TEST_ASSERT_SUCCESS(ret, + "Couldn't allocate memory for rte_bbdev_op_data structs"); + + ret = init_op_data_objs(*queue_ops[type], ref_entries, + mbuf_pools[type], n, type, min_alignment); + TEST_ASSERT_SUCCESS(ret, + "Couldn't init rte_bbdev_op_data structs"); + } + + return 0; +} + +static void +free_buffers(struct active_device *ad, struct test_op_params *op_params) +{ + unsigned int i, j; + + rte_mempool_free(ad->ops_mempool); + rte_mempool_free(ad->in_mbuf_pool); + rte_mempool_free(ad->hard_out_mbuf_pool); + rte_mempool_free(ad->soft_out_mbuf_pool); + + for (i = 0; i < rte_lcore_count(); ++i) { + for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) { + rte_free(op_params->q_bufs[j][i].inputs); + rte_free(op_params->q_bufs[j][i].hard_outputs); + rte_free(op_params->q_bufs[j][i].soft_outputs); + } + } +} + +static void +copy_reference_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n, + unsigned int start_idx, + struct rte_bbdev_op_data *inputs, + struct rte_bbdev_op_data *hard_outputs, + struct rte_bbdev_op_data *soft_outputs, + struct rte_bbdev_dec_op *ref_op) +{ + unsigned int i; + struct rte_bbdev_op_turbo_dec *turbo_dec = &ref_op->turbo_dec; + + for (i = 0; i < n; ++i) { + if (turbo_dec->code_block_mode == 0) { + ops[i]->turbo_dec.tb_params.ea = + turbo_dec->tb_params.ea; + ops[i]->turbo_dec.tb_params.eb = + turbo_dec->tb_params.eb; + ops[i]->turbo_dec.tb_params.k_pos = + turbo_dec->tb_params.k_pos; + ops[i]->turbo_dec.tb_params.k_neg = + turbo_dec->tb_params.k_neg; + ops[i]->turbo_dec.tb_params.c = + turbo_dec->tb_params.c; + ops[i]->turbo_dec.tb_params.c_neg = + turbo_dec->tb_params.c_neg; + ops[i]->turbo_dec.tb_params.cab = + turbo_dec->tb_params.cab; + } else { + ops[i]->turbo_dec.cb_params.e = turbo_dec->cb_params.e; + ops[i]->turbo_dec.cb_params.k = turbo_dec->cb_params.k; + } + + ops[i]->turbo_dec.ext_scale = turbo_dec->ext_scale; + ops[i]->turbo_dec.iter_max = turbo_dec->iter_max; + ops[i]->turbo_dec.iter_min = turbo_dec->iter_min; + ops[i]->turbo_dec.op_flags = turbo_dec->op_flags; + ops[i]->turbo_dec.rv_index = turbo_dec->rv_index; + ops[i]->turbo_dec.num_maps = turbo_dec->num_maps; + ops[i]->turbo_dec.code_block_mode = turbo_dec->code_block_mode; + + ops[i]->turbo_dec.hard_output = hard_outputs[start_idx + i]; + ops[i]->turbo_dec.input = inputs[start_idx + i]; + if (soft_outputs != NULL) + ops[i]->turbo_dec.soft_output = + soft_outputs[start_idx + i]; + } +} + +static void +copy_reference_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n, + unsigned int start_idx, + struct rte_bbdev_op_data *inputs, + struct rte_bbdev_op_data *outputs, + struct rte_bbdev_enc_op *ref_op) +{ + unsigned int i; + struct rte_bbdev_op_turbo_enc *turbo_enc = &ref_op->turbo_enc; + for (i = 0; i < n; ++i) { + if (turbo_enc->code_block_mode == 0) { + ops[i]->turbo_enc.tb_params.ea = + turbo_enc->tb_params.ea; + ops[i]->turbo_enc.tb_params.eb = + turbo_enc->tb_params.eb; + ops[i]->turbo_enc.tb_params.k_pos = + turbo_enc->tb_params.k_pos; + ops[i]->turbo_enc.tb_params.k_neg = + turbo_enc->tb_params.k_neg; + ops[i]->turbo_enc.tb_params.c = + turbo_enc->tb_params.c; + ops[i]->turbo_enc.tb_params.c_neg = + turbo_enc->tb_params.c_neg; + ops[i]->turbo_enc.tb_params.cab = + turbo_enc->tb_params.cab; + ops[i]->turbo_enc.tb_params.ncb_pos = + turbo_enc->tb_params.ncb_pos; + ops[i]->turbo_enc.tb_params.ncb_neg = + turbo_enc->tb_params.ncb_neg; + ops[i]->turbo_enc.tb_params.r = turbo_enc->tb_params.r; + } else { + ops[i]->turbo_enc.cb_params.e = turbo_enc->cb_params.e; + ops[i]->turbo_enc.cb_params.k = turbo_enc->cb_params.k; + ops[i]->turbo_enc.cb_params.ncb = + turbo_enc->cb_params.ncb; + } + ops[i]->turbo_enc.rv_index = turbo_enc->rv_index; + ops[i]->turbo_enc.op_flags = turbo_enc->op_flags; + ops[i]->turbo_enc.code_block_mode = turbo_enc->code_block_mode; + + ops[i]->turbo_enc.output = outputs[start_idx + i]; + ops[i]->turbo_enc.input = inputs[start_idx + i]; + } +} + +static int +check_dec_status_and_ordering(struct rte_bbdev_dec_op *op, + unsigned int order_idx, const int expected_status) +{ + TEST_ASSERT(op->status == expected_status, + "op_status (%d) != expected_status (%d)", + op->status, expected_status); + + TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data, + "Ordering error, expected %p, got %p", + (void *)(uintptr_t)order_idx, op->opaque_data); + + return TEST_SUCCESS; +} + +static int +check_enc_status_and_ordering(struct rte_bbdev_enc_op *op, + unsigned int order_idx, const int expected_status) +{ + TEST_ASSERT(op->status == expected_status, + "op_status (%d) != expected_status (%d)", + op->status, expected_status); + + TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data, + "Ordering error, expected %p, got %p", + (void *)(uintptr_t)order_idx, op->opaque_data); + + return TEST_SUCCESS; +} + +static inline int +validate_op_chain(struct rte_bbdev_op_data *op, + struct op_data_entries *orig_op) +{ + uint8_t i; + struct rte_mbuf *m = op->data; + uint8_t nb_dst_segments = orig_op->nb_segments; + + TEST_ASSERT(nb_dst_segments == m->nb_segs, + "Number of segments differ in original (%u) and filled (%u) op", + nb_dst_segments, m->nb_segs); + + for (i = 0; i < nb_dst_segments; ++i) { + /* Apply offset to the first mbuf segment */ + uint16_t offset = (i == 0) ? op->offset : 0; + uint16_t data_len = m->data_len - offset; + + TEST_ASSERT(orig_op->segments[i].length == data_len, + "Length of segment differ in original (%u) and filled (%u) op", + orig_op->segments[i].length, data_len); + TEST_ASSERT_BUFFERS_ARE_EQUAL(orig_op->segments[i].addr, + rte_pktmbuf_mtod_offset(m, uint32_t *, offset), + data_len, + "Output buffers (CB=%u) are not equal", i); + m = m->next; + } + + return TEST_SUCCESS; +} + +static int +validate_dec_buffers(struct rte_bbdev_dec_op *ref_op, struct test_buffers *bufs, + const uint16_t num_to_process) +{ + int i; + + struct op_data_entries *hard_data_orig = + &test_vector.entries[DATA_HARD_OUTPUT]; + struct op_data_entries *soft_data_orig = + &test_vector.entries[DATA_SOFT_OUTPUT]; + + for (i = 0; i < num_to_process; i++) { + TEST_ASSERT_SUCCESS(validate_op_chain(&bufs->hard_outputs[i], + hard_data_orig), + "Hard output buffers are not equal"); + if (ref_op->turbo_dec.op_flags & + RTE_BBDEV_TURBO_SOFT_OUTPUT) + TEST_ASSERT_SUCCESS(validate_op_chain( + &bufs->soft_outputs[i], + soft_data_orig), + "Soft output buffers are not equal"); + } + + return TEST_SUCCESS; +} + +static int +validate_enc_buffers(struct test_buffers *bufs, const uint16_t num_to_process) +{ + int i; + + struct op_data_entries *hard_data_orig = + &test_vector.entries[DATA_HARD_OUTPUT]; + + for (i = 0; i < num_to_process; i++) + TEST_ASSERT_SUCCESS(validate_op_chain(&bufs->hard_outputs[i], + hard_data_orig), ""); + + return TEST_SUCCESS; +} + +static int +validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n, + struct rte_bbdev_dec_op *ref_op, const int vector_mask) +{ + unsigned int i; + int ret; + struct op_data_entries *hard_data_orig = + &test_vector.entries[DATA_HARD_OUTPUT]; + struct op_data_entries *soft_data_orig = + &test_vector.entries[DATA_SOFT_OUTPUT]; + struct rte_bbdev_op_turbo_dec *ops_td; + struct rte_bbdev_op_data *hard_output; + struct rte_bbdev_op_data *soft_output; + struct rte_bbdev_op_turbo_dec *ref_td = &ref_op->turbo_dec; + + for (i = 0; i < n; ++i) { + ops_td = &ops[i]->turbo_dec; + hard_output = &ops_td->hard_output; + soft_output = &ops_td->soft_output; + + if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT) + TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count, + "Returned iter_count (%d) > expected iter_count (%d)", + ops_td->iter_count, ref_td->iter_count); + ret = check_dec_status_and_ordering(ops[i], i, ref_op->status); + TEST_ASSERT_SUCCESS(ret, + "Checking status and ordering for decoder failed"); + + TEST_ASSERT_SUCCESS(validate_op_chain(hard_output, + hard_data_orig), + "Hard output buffers (CB=%u) are not equal", + i); + + if (ref_op->turbo_dec.op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT) + TEST_ASSERT_SUCCESS(validate_op_chain(soft_output, + soft_data_orig), + "Soft output buffers (CB=%u) are not equal", + i); + } + + return TEST_SUCCESS; +} + +static int +validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n, + struct rte_bbdev_enc_op *ref_op) +{ + unsigned int i; + int ret; + struct op_data_entries *hard_data_orig = + &test_vector.entries[DATA_HARD_OUTPUT]; + + for (i = 0; i < n; ++i) { + ret = check_enc_status_and_ordering(ops[i], i, ref_op->status); + TEST_ASSERT_SUCCESS(ret, + "Checking status and ordering for encoder failed"); + TEST_ASSERT_SUCCESS(validate_op_chain( + &ops[i]->turbo_enc.output, + hard_data_orig), + "Output buffers (CB=%u) are not equal", + i); + } + + return TEST_SUCCESS; +} + +static void +create_reference_dec_op(struct rte_bbdev_dec_op *op) +{ + unsigned int i; + struct op_data_entries *entry; + + op->turbo_dec = test_vector.turbo_dec; + entry = &test_vector.entries[DATA_INPUT]; + for (i = 0; i < entry->nb_segments; ++i) + op->turbo_dec.input.length += + entry->segments[i].length; +} + +static void +create_reference_enc_op(struct rte_bbdev_enc_op *op) +{ + unsigned int i; + struct op_data_entries *entry; + + op->turbo_enc = test_vector.turbo_enc; + entry = &test_vector.entries[DATA_INPUT]; + for (i = 0; i < entry->nb_segments; ++i) + op->turbo_enc.input.length += + entry->segments[i].length; +} + +static int +init_test_op_params(struct test_op_params *op_params, + enum rte_bbdev_op_type op_type, const int expected_status, + const int vector_mask, struct rte_mempool *ops_mp, + uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores) +{ + int ret = 0; + if (op_type == RTE_BBDEV_OP_TURBO_DEC) + ret = rte_bbdev_dec_op_alloc_bulk(ops_mp, + &op_params->ref_dec_op, 1); + else + ret = rte_bbdev_enc_op_alloc_bulk(ops_mp, + &op_params->ref_enc_op, 1); + + TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed"); + + op_params->mp = ops_mp; + op_params->burst_sz = burst_sz; + op_params->num_to_process = num_to_process; + op_params->num_lcores = num_lcores; + op_params->vector_mask = vector_mask; + if (op_type == RTE_BBDEV_OP_TURBO_DEC) + op_params->ref_dec_op->status = expected_status; + else if (op_type == RTE_BBDEV_OP_TURBO_ENC) + op_params->ref_enc_op->status = expected_status; + + return 0; +} + +static int +run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id, + struct test_op_params *op_params) +{ + int t_ret, f_ret, socket_id = SOCKET_ID_ANY; + unsigned int i; + struct active_device *ad; + unsigned int burst_sz = get_burst_sz(); + enum rte_bbdev_op_type op_type = test_vector.op_type; + + ad = &active_devs[dev_id]; + + /* Check if device supports op_type */ + if (!is_avail_op(ad, test_vector.op_type)) + return TEST_SUCCESS; + + struct rte_bbdev_info info; + rte_bbdev_info_get(ad->dev_id, &info); + socket_id = GET_SOCKET(info.socket_id); + + if (op_type == RTE_BBDEV_OP_NONE) + op_type = RTE_BBDEV_OP_TURBO_ENC; + f_ret = create_mempools(ad, socket_id, op_type, + get_num_ops()); + if (f_ret != TEST_SUCCESS) { + printf("Couldn't create mempools"); + goto fail; + } + + f_ret = init_test_op_params(op_params, test_vector.op_type, + test_vector.expected_status, + test_vector.mask, + ad->ops_mempool, + burst_sz, + get_num_ops(), + get_num_lcores()); + if (f_ret != TEST_SUCCESS) { + printf("Couldn't init test op params"); + goto fail; + } + + if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) + create_reference_dec_op(op_params->ref_dec_op); + else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) + create_reference_enc_op(op_params->ref_enc_op); + + for (i = 0; i < ad->nb_queues; ++i) { + f_ret = fill_queue_buffers(op_params, + ad->in_mbuf_pool, + ad->hard_out_mbuf_pool, + ad->soft_out_mbuf_pool, + ad->queue_ids[i], + info.drv.min_alignment, + socket_id); + if (f_ret != TEST_SUCCESS) { + printf("Couldn't init queue buffers"); + goto fail; + } + } + + /* Run test case function */ + t_ret = test_case_func(ad, op_params); + + /* Free active device resources and return */ + free_buffers(ad, op_params); + return t_ret; + +fail: + free_buffers(ad, op_params); + return TEST_FAILED; +} + +/* Run given test function per active device per supported op type + * per burst size. + */ +static int +run_test_case(test_case_function *test_case_func) +{ + int ret = 0; + uint8_t dev; + + /* Alloc op_params */ + struct test_op_params *op_params = rte_zmalloc(NULL, + sizeof(struct test_op_params), RTE_CACHE_LINE_SIZE); + TEST_ASSERT_NOT_NULL(op_params, "Failed to alloc %zuB for op_params", + RTE_ALIGN(sizeof(struct test_op_params), + RTE_CACHE_LINE_SIZE)); + + /* For each device run test case function */ + for (dev = 0; dev < nb_active_devs; ++dev) + ret |= run_test_case_on_device(test_case_func, dev, op_params); + + rte_free(op_params); + + return ret; +} + +static void +dequeue_event_callback(uint16_t dev_id, + enum rte_bbdev_event_type event, void *cb_arg, + void *ret_param) +{ + int ret; + uint16_t i; + uint64_t total_time; + uint16_t deq, burst_sz, num_to_process; + uint16_t queue_id = INVALID_QUEUE_ID; + struct rte_bbdev_dec_op *dec_ops[MAX_BURST]; + struct rte_bbdev_enc_op *enc_ops[MAX_BURST]; + struct test_buffers *bufs; + struct rte_bbdev_info info; + + /* Input length in bytes, million operations per second, + * million bits per second. + */ + double in_len; + + struct thread_params *tp = cb_arg; + + RTE_SET_USED(ret_param); + queue_id = tp->queue_id; + + /* Find matching thread params using queue_id */ + for (i = 0; i < MAX_QUEUES; ++i, ++tp) + if (tp->queue_id == queue_id) + break; + + if (i == MAX_QUEUES) { + printf("%s: Queue_id from interrupt details was not found!\n", + __func__); + return; + } + + if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) { + rte_atomic16_set(&tp->processing_status, TEST_FAILED); + printf( + "Dequeue interrupt handler called for incorrect event!\n"); + return; + } + + burst_sz = tp->op_params->burst_sz; + num_to_process = tp->op_params->num_to_process; + + if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) + deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id, dec_ops, + burst_sz); + else + deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id, enc_ops, + burst_sz); + + if (deq < burst_sz) { + printf( + "After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n", + burst_sz, deq); + rte_atomic16_set(&tp->processing_status, TEST_FAILED); + return; + } + + if (rte_atomic16_read(&tp->nb_dequeued) + deq < num_to_process) { + rte_atomic16_add(&tp->nb_dequeued, deq); + return; + } + + total_time = rte_rdtsc_precise() - tp->start_time; + + rte_bbdev_info_get(dev_id, &info); + + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + ret = TEST_SUCCESS; + if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) + ret = validate_dec_buffers(tp->op_params->ref_dec_op, bufs, + num_to_process); + else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) + ret = validate_enc_buffers(bufs, num_to_process); + + if (ret) { + printf("Buffers validation failed\n"); + rte_atomic16_set(&tp->processing_status, TEST_FAILED); + } + + switch (test_vector.op_type) { + case RTE_BBDEV_OP_TURBO_DEC: + in_len = tp->op_params->ref_dec_op->turbo_dec.input.length; + break; + case RTE_BBDEV_OP_TURBO_ENC: + in_len = tp->op_params->ref_enc_op->turbo_enc.input.length; + break; + case RTE_BBDEV_OP_NONE: + in_len = 0.0; + break; + default: + printf("Unknown op type: %d\n", test_vector.op_type); + rte_atomic16_set(&tp->processing_status, TEST_FAILED); + return; + } + + tp->mops = ((double)num_to_process / 1000000.0) / + ((double)total_time / (double)rte_get_tsc_hz()); + tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) / + ((double)total_time / (double)rte_get_tsc_hz()); + + rte_atomic16_add(&tp->nb_dequeued, deq); +} + +static int +throughput_intr_lcore_dec(void *arg) +{ + struct thread_params *tp = arg; + unsigned int enqueued; + struct rte_bbdev_dec_op *ops[MAX_BURST]; + const uint16_t queue_id = tp->queue_id; + const uint16_t burst_sz = tp->op_params->burst_sz; + const uint16_t num_to_process = tp->op_params->num_to_process; + struct test_buffers *bufs = NULL; + unsigned int allocs_failed = 0; + struct rte_bbdev_info info; + int ret; + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id), + "Failed to enable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + + rte_bbdev_info_get(tp->dev_id, &info); + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + rte_atomic16_clear(&tp->processing_status); + rte_atomic16_clear(&tp->nb_dequeued); + + while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT) + rte_pause(); + + tp->start_time = rte_rdtsc_precise(); + for (enqueued = 0; enqueued < num_to_process;) { + + uint16_t num_to_enq = burst_sz; + + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + + ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops, + num_to_enq); + if (ret != 0) { + allocs_failed++; + continue; + } + + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_dec_op(ops, num_to_enq, enqueued, + bufs->inputs, + bufs->hard_outputs, + bufs->soft_outputs, + tp->op_params->ref_dec_op); + + enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id, queue_id, ops, + num_to_enq); + + rte_bbdev_dec_op_free_bulk(ops, num_to_enq); + } + + if (allocs_failed > 0) + printf("WARNING: op allocations failed: %u times\n", + allocs_failed); + + return TEST_SUCCESS; +} + +static int +throughput_intr_lcore_enc(void *arg) +{ + struct thread_params *tp = arg; + unsigned int enqueued; + struct rte_bbdev_enc_op *ops[MAX_BURST]; + const uint16_t queue_id = tp->queue_id; + const uint16_t burst_sz = tp->op_params->burst_sz; + const uint16_t num_to_process = tp->op_params->num_to_process; + struct test_buffers *bufs = NULL; + unsigned int allocs_failed = 0; + struct rte_bbdev_info info; + int ret; + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id), + "Failed to enable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + + rte_bbdev_info_get(tp->dev_id, &info); + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + rte_atomic16_clear(&tp->processing_status); + rte_atomic16_clear(&tp->nb_dequeued); + + while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT) + rte_pause(); + + tp->start_time = rte_rdtsc_precise(); + for (enqueued = 0; enqueued < num_to_process;) { + + uint16_t num_to_enq = burst_sz; + + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + + ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops, + num_to_enq); + if (ret != 0) { + allocs_failed++; + continue; + } + + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_enc_op(ops, num_to_enq, enqueued, + bufs->inputs, + bufs->hard_outputs, + tp->op_params->ref_enc_op); + + enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id, queue_id, ops, + num_to_enq); + + rte_bbdev_enc_op_free_bulk(ops, num_to_enq); + } + + if (allocs_failed > 0) + printf("WARNING: op allocations failed: %u times\n", + allocs_failed); + + return TEST_SUCCESS; +} + +static int +throughput_pmd_lcore_dec(void *arg) +{ + struct thread_params *tp = arg; + unsigned int enqueued, dequeued; + struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; + uint64_t total_time, start_time; + const uint16_t queue_id = tp->queue_id; + const uint16_t burst_sz = tp->op_params->burst_sz; + const uint16_t num_to_process = tp->op_params->num_to_process; + struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op; + struct test_buffers *bufs = NULL; + unsigned int allocs_failed = 0; + int ret; + struct rte_bbdev_info info; + + /* Input length in bytes, million operations per second, million bits + * per second. + */ + double in_len; + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + rte_bbdev_info_get(tp->dev_id, &info); + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT) + rte_pause(); + + start_time = rte_rdtsc_precise(); + for (enqueued = 0, dequeued = 0; dequeued < num_to_process;) { + uint16_t deq; + + if (likely(enqueued < num_to_process)) { + + uint16_t num_to_enq = burst_sz; + + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + + ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, + ops_enq, num_to_enq); + if (ret != 0) { + allocs_failed++; + goto do_dequeue; + } + + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_dec_op(ops_enq, num_to_enq, + enqueued, + bufs->inputs, + bufs->hard_outputs, + bufs->soft_outputs, + ref_op); + + enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id, + queue_id, ops_enq, num_to_enq); + } +do_dequeue: + deq = rte_bbdev_dequeue_dec_ops(tp->dev_id, queue_id, ops_deq, + burst_sz); + dequeued += deq; + rte_bbdev_dec_op_free_bulk(ops_enq, deq); + } + total_time = rte_rdtsc_precise() - start_time; + + if (allocs_failed > 0) + printf("WARNING: op allocations failed: %u times\n", + allocs_failed); + + TEST_ASSERT(enqueued == dequeued, "enqueued (%u) != dequeued (%u)", + enqueued, dequeued); + + if (test_vector.op_type != RTE_BBDEV_OP_NONE) { + ret = validate_dec_buffers(ref_op, bufs, num_to_process); + TEST_ASSERT_SUCCESS(ret, "Buffers validation failed"); + } + + in_len = ref_op->turbo_dec.input.length; + tp->mops = ((double)num_to_process / 1000000.0) / + ((double)total_time / (double)rte_get_tsc_hz()); + tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) / + ((double)total_time / (double)rte_get_tsc_hz()); + + return TEST_SUCCESS; +} + +static int +throughput_pmd_lcore_enc(void *arg) +{ + struct thread_params *tp = arg; + unsigned int enqueued, dequeued; + struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; + uint64_t total_time, start_time; + const uint16_t queue_id = tp->queue_id; + const uint16_t burst_sz = tp->op_params->burst_sz; + const uint16_t num_to_process = tp->op_params->num_to_process; + struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op; + struct test_buffers *bufs = NULL; + unsigned int allocs_failed = 0; + int ret; + struct rte_bbdev_info info; + + /* Input length in bytes, million operations per second, million bits + * per second. + */ + double in_len; + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + rte_bbdev_info_get(tp->dev_id, &info); + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT) + rte_pause(); + + start_time = rte_rdtsc_precise(); + for (enqueued = 0, dequeued = 0; dequeued < num_to_process;) { + uint16_t deq; + + if (likely(enqueued < num_to_process)) { + + uint16_t num_to_enq = burst_sz; + + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + + ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, + ops_enq, num_to_enq); + if (ret != 0) { + allocs_failed++; + goto do_dequeue; + } + + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_enc_op(ops_enq, num_to_enq, + enqueued, + bufs->inputs, + bufs->hard_outputs, + ref_op); + + enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id, + queue_id, ops_enq, num_to_enq); + } +do_dequeue: + deq = rte_bbdev_dequeue_enc_ops(tp->dev_id, queue_id, ops_deq, + burst_sz); + dequeued += deq; + rte_bbdev_enc_op_free_bulk(ops_enq, deq); + } + total_time = rte_rdtsc_precise() - start_time; + + if (allocs_failed > 0) + printf("WARNING: op allocations failed: %u times\n", + allocs_failed); + + TEST_ASSERT(enqueued == dequeued, "enqueued (%u) != dequeued (%u)", + enqueued, dequeued); + + if (test_vector.op_type != RTE_BBDEV_OP_NONE) { + ret = validate_enc_buffers(bufs, num_to_process); + TEST_ASSERT_SUCCESS(ret, "Buffers validation failed"); + } + + in_len = ref_op->turbo_enc.input.length; + + tp->mops = ((double)num_to_process / 1000000.0) / + ((double)total_time / (double)rte_get_tsc_hz()); + tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) / + ((double)total_time / (double)rte_get_tsc_hz()); + + return TEST_SUCCESS; +} +static void +print_throughput(struct thread_params *t_params, unsigned int used_cores) +{ + unsigned int lcore_id, iter = 0; + double total_mops = 0, total_mbps = 0; + + RTE_LCORE_FOREACH(lcore_id) { + if (iter++ >= used_cores) + break; + printf("\tlcore_id: %u, throughput: %.8lg MOPS, %.8lg Mbps\n", + lcore_id, t_params[lcore_id].mops, t_params[lcore_id].mbps); + total_mops += t_params[lcore_id].mops; + total_mbps += t_params[lcore_id].mbps; + } + printf( + "\n\tTotal stats for %u cores: throughput: %.8lg MOPS, %.8lg Mbps\n", + used_cores, total_mops, total_mbps); +} + +/* + * Test function that determines how long an enqueue + dequeue of a burst + * takes on available lcores. + */ +static int +throughput_test(struct active_device *ad, + struct test_op_params *op_params) +{ + int ret; + unsigned int lcore_id, used_cores = 0; + struct thread_params t_params[MAX_QUEUES]; + struct rte_bbdev_info info; + lcore_function_t *throughput_function; + struct thread_params *tp; + uint16_t num_lcores; + const char *op_type_str; + + rte_bbdev_info_get(ad->dev_id, &info); + + op_type_str = rte_bbdev_op_type_str(test_vector.op_type); + TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", + test_vector.op_type); + + printf( + "Throughput test: dev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, int mode: %s, GHz: %lg\n", + info.dev_name, ad->nb_queues, op_params->burst_sz, + op_params->num_to_process, op_params->num_lcores, + op_type_str, + intr_enabled ? "Interrupt mode" : "PMD mode", + (double)rte_get_tsc_hz() / 1000000000.0); + + /* Set number of lcores */ + num_lcores = (ad->nb_queues < (op_params->num_lcores)) + ? ad->nb_queues + : op_params->num_lcores; + + if (intr_enabled) { + if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) + throughput_function = throughput_intr_lcore_dec; + else + throughput_function = throughput_intr_lcore_enc; + + /* Dequeue interrupt callback registration */ + ret = rte_bbdev_callback_register(ad->dev_id, + RTE_BBDEV_EVENT_DEQUEUE, dequeue_event_callback, + &t_params); + if (ret < 0) + return ret; + } else { + if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) + throughput_function = throughput_pmd_lcore_dec; + else + throughput_function = throughput_pmd_lcore_enc; + } + + rte_atomic16_set(&op_params->sync, SYNC_WAIT); + + t_params[rte_lcore_id()].dev_id = ad->dev_id; + t_params[rte_lcore_id()].op_params = op_params; + t_params[rte_lcore_id()].queue_id = + ad->queue_ids[used_cores++]; + + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (used_cores >= num_lcores) + break; + + t_params[lcore_id].dev_id = ad->dev_id; + t_params[lcore_id].op_params = op_params; + t_params[lcore_id].queue_id = ad->queue_ids[used_cores++]; + + rte_eal_remote_launch(throughput_function, &t_params[lcore_id], + lcore_id); + } + + rte_atomic16_set(&op_params->sync, SYNC_START); + ret = throughput_function(&t_params[rte_lcore_id()]); + + /* Master core is always used */ + used_cores = 1; + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (used_cores++ >= num_lcores) + break; + + ret |= rte_eal_wait_lcore(lcore_id); + } + + /* Return if test failed */ + if (ret) + return ret; + + /* Print throughput if interrupts are disabled and test passed */ + if (!intr_enabled) { + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + print_throughput(t_params, num_lcores); + return ret; + } + + /* In interrupt TC we need to wait for the interrupt callback to deqeue + * all pending operations. Skip waiting for queues which reported an + * error using processing_status variable. + * Wait for master lcore operations. + */ + tp = &t_params[rte_lcore_id()]; + while ((rte_atomic16_read(&tp->nb_dequeued) < + op_params->num_to_process) && + (rte_atomic16_read(&tp->processing_status) != + TEST_FAILED)) + rte_pause(); + + ret |= rte_atomic16_read(&tp->processing_status); + + /* Wait for slave lcores operations */ + used_cores = 1; + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + tp = &t_params[lcore_id]; + if (used_cores++ >= num_lcores) + break; + + while ((rte_atomic16_read(&tp->nb_dequeued) < + op_params->num_to_process) && + (rte_atomic16_read(&tp->processing_status) != + TEST_FAILED)) + rte_pause(); + + ret |= rte_atomic16_read(&tp->processing_status); + } + + /* Print throughput if test passed */ + if (!ret && test_vector.op_type != RTE_BBDEV_OP_NONE) + print_throughput(t_params, num_lcores); + + return ret; +} + +static int +operation_latency_test_dec(struct rte_mempool *mempool, + struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op, + int vector_mask, uint16_t dev_id, uint16_t queue_id, + const uint16_t num_to_process, uint16_t burst_sz, + uint64_t *total_time) +{ + int ret = TEST_SUCCESS; + uint16_t i, j, dequeued; + struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; + uint64_t start_time = 0; + + for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { + uint16_t enq = 0, deq = 0; + bool first_time = true; + + if (unlikely(num_to_process - dequeued < burst_sz)) + burst_sz = num_to_process - dequeued; + + rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz); + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_dec_op(ops_enq, burst_sz, dequeued, + bufs->inputs, + bufs->hard_outputs, + bufs->soft_outputs, + ref_op); + + /* Set counter to validate the ordering */ + for (j = 0; j < burst_sz; ++j) + ops_enq[j]->opaque_data = (void *)(uintptr_t)j; + + start_time = rte_rdtsc_precise(); + + enq = rte_bbdev_enqueue_dec_ops(dev_id, queue_id, &ops_enq[enq], + burst_sz); + TEST_ASSERT(enq == burst_sz, + "Error enqueueing burst, expected %u, got %u", + burst_sz, enq); + + /* Dequeue */ + do { + deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id, + &ops_deq[deq], burst_sz - deq); + if (likely(first_time && (deq > 0))) { + *total_time += rte_rdtsc_precise() - start_time; + first_time = false; + } + } while (unlikely(burst_sz != deq)); + + if (test_vector.op_type != RTE_BBDEV_OP_NONE) { + ret = validate_dec_op(ops_deq, burst_sz, ref_op, + vector_mask); + TEST_ASSERT_SUCCESS(ret, "Validation failed!"); + } + + rte_bbdev_dec_op_free_bulk(ops_enq, deq); + dequeued += deq; + } + + return i; +} + +static int +operation_latency_test_enc(struct rte_mempool *mempool, + struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op, + uint16_t dev_id, uint16_t queue_id, + const uint16_t num_to_process, uint16_t burst_sz, + uint64_t *total_time) +{ + int ret = TEST_SUCCESS; + uint16_t i, j, dequeued; + struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; + uint64_t start_time = 0; + + for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { + uint16_t enq = 0, deq = 0; + bool first_time = true; + + if (unlikely(num_to_process - dequeued < burst_sz)) + burst_sz = num_to_process - dequeued; + + rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz); + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_enc_op(ops_enq, burst_sz, dequeued, + bufs->inputs, + bufs->hard_outputs, + ref_op); + + /* Set counter to validate the ordering */ + for (j = 0; j < burst_sz; ++j) + ops_enq[j]->opaque_data = (void *)(uintptr_t)j; + + start_time = rte_rdtsc_precise(); + + enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq], + burst_sz); + TEST_ASSERT(enq == burst_sz, + "Error enqueueing burst, expected %u, got %u", + burst_sz, enq); + + /* Dequeue */ + do { + deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id, + &ops_deq[deq], burst_sz - deq); + if (likely(first_time && (deq > 0))) { + *total_time += rte_rdtsc_precise() - start_time; + first_time = false; + } + } while (unlikely(burst_sz != deq)); + + if (test_vector.op_type != RTE_BBDEV_OP_NONE) { + ret = validate_enc_op(ops_deq, burst_sz, ref_op); + TEST_ASSERT_SUCCESS(ret, "Validation failed!"); + } + + rte_bbdev_enc_op_free_bulk(ops_enq, deq); + dequeued += deq; + } + + return i; +} + +static int +operation_latency_test(struct active_device *ad, + struct test_op_params *op_params) +{ + int iter; + uint16_t burst_sz = op_params->burst_sz; + const uint16_t num_to_process = op_params->num_to_process; + const enum rte_bbdev_op_type op_type = test_vector.op_type; + const uint16_t queue_id = ad->queue_ids[0]; + struct test_buffers *bufs = NULL; + struct rte_bbdev_info info; + uint64_t total_time = 0; + const char *op_type_str; + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + rte_bbdev_info_get(ad->dev_id, &info); + bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + op_type_str = rte_bbdev_op_type_str(op_type); + TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type); + + printf( + "Validation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n", + info.dev_name, burst_sz, num_to_process, op_type_str); + + if (op_type == RTE_BBDEV_OP_TURBO_DEC) + iter = operation_latency_test_dec(op_params->mp, bufs, + op_params->ref_dec_op, op_params->vector_mask, + ad->dev_id, queue_id, num_to_process, + burst_sz, &total_time); + else + iter = operation_latency_test_enc(op_params->mp, bufs, + op_params->ref_enc_op, ad->dev_id, queue_id, + num_to_process, burst_sz, &total_time); + + if (iter <= 0) + return TEST_FAILED; + + printf("\toperation avg. latency: %lg cycles, %lg us\n", + (double)total_time / (double)iter, + (double)(total_time * 1000000) / (double)iter / + (double)rte_get_tsc_hz()); + + return TEST_SUCCESS; +} + +static int +offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs, + struct rte_bbdev_dec_op *ref_op, uint16_t dev_id, + uint16_t queue_id, const uint16_t num_to_process, + uint16_t burst_sz, uint64_t *enq_total_time, + uint64_t *deq_total_time) +{ + int i, dequeued; + struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; + uint64_t enq_start_time, deq_start_time; + + for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { + uint16_t enq = 0, deq = 0; + + if (unlikely(num_to_process - dequeued < burst_sz)) + burst_sz = num_to_process - dequeued; + + rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz); + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_dec_op(ops_enq, burst_sz, dequeued, + bufs->inputs, + bufs->hard_outputs, + bufs->soft_outputs, + ref_op); + + /* Start time measurment for enqueue function offload latency */ + enq_start_time = rte_rdtsc(); + do { + enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id, + &ops_enq[enq], burst_sz - enq); + } while (unlikely(burst_sz != enq)); + *enq_total_time += rte_rdtsc() - enq_start_time; + + /* ensure enqueue has been completed */ + rte_delay_ms(10); + + /* Start time measurment for dequeue function offload latency */ + deq_start_time = rte_rdtsc(); + do { + deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id, + &ops_deq[deq], burst_sz - deq); + } while (unlikely(burst_sz != deq)); + *deq_total_time += rte_rdtsc() - deq_start_time; + + rte_bbdev_dec_op_free_bulk(ops_enq, deq); + dequeued += deq; + } + + return i; +} + +static int +offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs, + struct rte_bbdev_enc_op *ref_op, uint16_t dev_id, + uint16_t queue_id, const uint16_t num_to_process, + uint16_t burst_sz, uint64_t *enq_total_time, + uint64_t *deq_total_time) +{ + int i, dequeued; + struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST]; + uint64_t enq_start_time, deq_start_time; + + for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) { + uint16_t enq = 0, deq = 0; + + if (unlikely(num_to_process - dequeued < burst_sz)) + burst_sz = num_to_process - dequeued; + + rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz); + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_enc_op(ops_enq, burst_sz, dequeued, + bufs->inputs, + bufs->hard_outputs, + ref_op); + + /* Start time measurment for enqueue function offload latency */ + enq_start_time = rte_rdtsc(); + do { + enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id, + &ops_enq[enq], burst_sz - enq); + } while (unlikely(burst_sz != enq)); + *enq_total_time += rte_rdtsc() - enq_start_time; + + /* ensure enqueue has been completed */ + rte_delay_ms(10); + + /* Start time measurment for dequeue function offload latency */ + deq_start_time = rte_rdtsc(); + do { + deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id, + &ops_deq[deq], burst_sz - deq); + } while (unlikely(burst_sz != deq)); + *deq_total_time += rte_rdtsc() - deq_start_time; + + rte_bbdev_enc_op_free_bulk(ops_enq, deq); + dequeued += deq; + } + + return i; +} + +static int +offload_latency_test(struct active_device *ad, + struct test_op_params *op_params) +{ + int iter; + uint64_t enq_total_time = 0, deq_total_time = 0; + uint16_t burst_sz = op_params->burst_sz; + const uint16_t num_to_process = op_params->num_to_process; + const enum rte_bbdev_op_type op_type = test_vector.op_type; + const uint16_t queue_id = ad->queue_ids[0]; + struct test_buffers *bufs = NULL; + struct rte_bbdev_info info; + const char *op_type_str; + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + rte_bbdev_info_get(ad->dev_id, &info); + bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + op_type_str = rte_bbdev_op_type_str(op_type); + TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type); + + printf( + "Offload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n", + info.dev_name, burst_sz, num_to_process, op_type_str); + + if (op_type == RTE_BBDEV_OP_TURBO_DEC) + iter = offload_latency_test_dec(op_params->mp, bufs, + op_params->ref_dec_op, ad->dev_id, queue_id, + num_to_process, burst_sz, &enq_total_time, + &deq_total_time); + else + iter = offload_latency_test_enc(op_params->mp, bufs, + op_params->ref_enc_op, ad->dev_id, queue_id, + num_to_process, burst_sz, &enq_total_time, + &deq_total_time); + + if (iter <= 0) + return TEST_FAILED; + + printf("\tenq offload avg. latency: %lg cycles, %lg us\n", + (double)enq_total_time / (double)iter, + (double)(enq_total_time * 1000000) / (double)iter / + (double)rte_get_tsc_hz()); + + printf("\tdeq offload avg. latency: %lg cycles, %lg us\n", + (double)deq_total_time / (double)iter, + (double)(deq_total_time * 1000000) / (double)iter / + (double)rte_get_tsc_hz()); + + return TEST_SUCCESS; +} + +static int +offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id, + const uint16_t num_to_process, uint16_t burst_sz, + uint64_t *deq_total_time) +{ + int i, deq_total; + struct rte_bbdev_dec_op *ops[MAX_BURST]; + uint64_t deq_start_time; + + /* Test deq offload latency from an empty queue */ + deq_start_time = rte_rdtsc_precise(); + for (i = 0, deq_total = 0; deq_total < num_to_process; + ++i, deq_total += burst_sz) { + if (unlikely(num_to_process - deq_total < burst_sz)) + burst_sz = num_to_process - deq_total; + rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops, burst_sz); + } + *deq_total_time = rte_rdtsc_precise() - deq_start_time; + + return i; +} + +static int +offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id, + const uint16_t num_to_process, uint16_t burst_sz, + uint64_t *deq_total_time) +{ + int i, deq_total; + struct rte_bbdev_enc_op *ops[MAX_BURST]; + uint64_t deq_start_time; + + /* Test deq offload latency from an empty queue */ + deq_start_time = rte_rdtsc_precise(); + for (i = 0, deq_total = 0; deq_total < num_to_process; + ++i, deq_total += burst_sz) { + if (unlikely(num_to_process - deq_total < burst_sz)) + burst_sz = num_to_process - deq_total; + rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops, burst_sz); + } + *deq_total_time = rte_rdtsc_precise() - deq_start_time; + + return i; +} + +static int +offload_latency_empty_q_test(struct active_device *ad, + struct test_op_params *op_params) +{ + int iter; + uint64_t deq_total_time = 0; + uint16_t burst_sz = op_params->burst_sz; + const uint16_t num_to_process = op_params->num_to_process; + const enum rte_bbdev_op_type op_type = test_vector.op_type; + const uint16_t queue_id = ad->queue_ids[0]; + struct rte_bbdev_info info; + const char *op_type_str; + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + rte_bbdev_info_get(ad->dev_id, &info); + + op_type_str = rte_bbdev_op_type_str(op_type); + TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type); + + printf( + "Offload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n", + info.dev_name, burst_sz, num_to_process, op_type_str); + + if (op_type == RTE_BBDEV_OP_TURBO_DEC) + iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id, + num_to_process, burst_sz, &deq_total_time); + else + iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id, + num_to_process, burst_sz, &deq_total_time); + + if (iter <= 0) + return TEST_FAILED; + + printf("\tempty deq offload avg. latency: %lg cycles, %lg us\n", + (double)deq_total_time / (double)iter, + (double)(deq_total_time * 1000000) / (double)iter / + (double)rte_get_tsc_hz()); + + return TEST_SUCCESS; +} + +static int +throughput_tc(void) +{ + return run_test_case(throughput_test); +} + +static int +offload_latency_tc(void) +{ + return run_test_case(offload_latency_test); +} + +static int +offload_latency_empty_q_tc(void) +{ + return run_test_case(offload_latency_empty_q_test); +} + +static int +operation_latency_tc(void) +{ + return run_test_case(operation_latency_test); +} + +static int +interrupt_tc(void) +{ + return run_test_case(throughput_test); +} + +static struct unit_test_suite bbdev_throughput_testsuite = { + .suite_name = "BBdev Throughput Tests", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(ut_setup, ut_teardown, throughput_tc), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static struct unit_test_suite bbdev_validation_testsuite = { + .suite_name = "BBdev Validation Tests", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(ut_setup, ut_teardown, operation_latency_tc), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static struct unit_test_suite bbdev_latency_testsuite = { + .suite_name = "BBdev Latency Tests", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_tc), + TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc), + TEST_CASE_ST(ut_setup, ut_teardown, operation_latency_tc), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static struct unit_test_suite bbdev_interrupt_testsuite = { + .suite_name = "BBdev Interrupt Tests", + .setup = interrupt_testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(ut_setup, ut_teardown, interrupt_tc), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite); +REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite); +REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite); +REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite); diff --git a/app/test-bbdev/test_bbdev_vector.c b/app/test-bbdev/test_bbdev_vector.c new file mode 100644 index 00000000..addef057 --- /dev/null +++ b/app/test-bbdev/test_bbdev_vector.c @@ -0,0 +1,937 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifdef RTE_EXEC_ENV_BSDAPP + #define _WITH_GETLINE +#endif +#include <stdio.h> +#include <stdbool.h> +#include <rte_malloc.h> + +#include "test_bbdev_vector.h" + +#define VALUE_DELIMITER "," +#define ENTRY_DELIMITER "=" + +const char *op_data_prefixes[] = { + "input", + "soft_output", + "hard_output", +}; + +/* trim leading and trailing spaces */ +static void +trim_space(char *str) +{ + char *start, *end; + + for (start = str; *start; start++) { + if (!isspace((unsigned char) start[0])) + break; + } + + for (end = start + strlen(start); end > start + 1; end--) { + if (!isspace((unsigned char) end[-1])) + break; + } + + *end = 0; + + /* Shift from "start" to the beginning of the string */ + if (start > str) + memmove(str, start, (end - start) + 1); +} + +static bool +starts_with(const char *str, const char *pre) +{ + return strncmp(pre, str, strlen(pre)) == 0; +} + +/* tokenization test values separated by a comma */ +static int +parse_values(char *tokens, uint32_t **data, uint32_t *data_length) +{ + uint32_t n_tokens = 0; + uint32_t data_size = 32; + + uint32_t *values, *values_resized; + char *tok, *error = NULL; + + tok = strtok(tokens, VALUE_DELIMITER); + if (tok == NULL) + return -1; + + values = (uint32_t *) + rte_zmalloc(NULL, sizeof(uint32_t) * data_size, 0); + if (values == NULL) + return -1; + + while (tok != NULL) { + values_resized = NULL; + + if (n_tokens >= data_size) { + data_size *= 2; + + values_resized = (uint32_t *) rte_realloc(values, + sizeof(uint32_t) * data_size, 0); + if (values_resized == NULL) { + rte_free(values); + return -1; + } + values = values_resized; + } + + values[n_tokens] = (uint32_t) strtoul(tok, &error, 0); + if ((error == NULL) || (*error != '\0')) { + printf("Failed with convert '%s'\n", tok); + rte_free(values); + return -1; + } + + *data_length = *data_length + (strlen(tok) - strlen("0x"))/2; + + tok = strtok(NULL, VALUE_DELIMITER); + if (tok == NULL) + break; + + n_tokens++; + } + + values_resized = (uint32_t *) rte_realloc(values, + sizeof(uint32_t) * (n_tokens + 1), 0); + + if (values_resized == NULL) { + rte_free(values); + return -1; + } + + *data = values_resized; + + return 0; +} + +/* convert turbo decoder flag from string to unsigned long int*/ +static int +op_decoder_flag_strtoul(char *token, uint32_t *op_flag_value) +{ + if (!strcmp(token, "RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE")) + *op_flag_value = RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE; + else if (!strcmp(token, "RTE_BBDEV_TURBO_CRC_TYPE_24B")) + *op_flag_value = RTE_BBDEV_TURBO_CRC_TYPE_24B; + else if (!strcmp(token, "RTE_BBDEV_TURBO_EQUALIZER")) + *op_flag_value = RTE_BBDEV_TURBO_EQUALIZER; + else if (!strcmp(token, "RTE_BBDEV_TURBO_SOFT_OUT_SATURATE")) + *op_flag_value = RTE_BBDEV_TURBO_SOFT_OUT_SATURATE; + else if (!strcmp(token, "RTE_BBDEV_TURBO_HALF_ITERATION_EVEN")) + *op_flag_value = RTE_BBDEV_TURBO_HALF_ITERATION_EVEN; + else if (!strcmp(token, "RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH")) + *op_flag_value = RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH; + else if (!strcmp(token, "RTE_BBDEV_TURBO_SOFT_OUTPUT")) + *op_flag_value = RTE_BBDEV_TURBO_SOFT_OUTPUT; + else if (!strcmp(token, "RTE_BBDEV_TURBO_EARLY_TERMINATION")) + *op_flag_value = RTE_BBDEV_TURBO_EARLY_TERMINATION; + else if (!strcmp(token, "RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN")) + *op_flag_value = RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN; + else if (!strcmp(token, "RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN")) + *op_flag_value = RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN; + else if (!strcmp(token, "RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT")) + *op_flag_value = RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT; + else if (!strcmp(token, "RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT")) + *op_flag_value = RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT; + else if (!strcmp(token, "RTE_BBDEV_TURBO_MAP_DEC")) + *op_flag_value = RTE_BBDEV_TURBO_MAP_DEC; + else if (!strcmp(token, "RTE_BBDEV_TURBO_DEC_SCATTER_GATHER")) + *op_flag_value = RTE_BBDEV_TURBO_DEC_SCATTER_GATHER; + else { + printf("The given value is not a turbo decoder flag\n"); + return -1; + } + + return 0; +} + +/* convert turbo encoder flag from string to unsigned long int*/ +static int +op_encoder_flag_strtoul(char *token, uint32_t *op_flag_value) +{ + if (!strcmp(token, "RTE_BBDEV_TURBO_RV_INDEX_BYPASS")) + *op_flag_value = RTE_BBDEV_TURBO_RV_INDEX_BYPASS; + else if (!strcmp(token, "RTE_BBDEV_TURBO_RATE_MATCH")) + *op_flag_value = RTE_BBDEV_TURBO_RATE_MATCH; + else if (!strcmp(token, "RTE_BBDEV_TURBO_CRC_24B_ATTACH")) + *op_flag_value = RTE_BBDEV_TURBO_CRC_24B_ATTACH; + else if (!strcmp(token, "RTE_BBDEV_TURBO_CRC_24A_ATTACH")) + *op_flag_value = RTE_BBDEV_TURBO_CRC_24A_ATTACH; + else if (!strcmp(token, "RTE_BBDEV_TURBO_ENC_SCATTER_GATHER")) + *op_flag_value = RTE_BBDEV_TURBO_ENC_SCATTER_GATHER; + else { + printf("The given value is not a turbo encoder flag\n"); + return -1; + } + + return 0; +} + +/* tokenization turbo decoder/encoder flags values separated by a comma */ +static int +parse_turbo_flags(char *tokens, uint32_t *op_flags, + enum rte_bbdev_op_type op_type) +{ + char *tok = NULL; + uint32_t op_flag_value = 0; + + tok = strtok(tokens, VALUE_DELIMITER); + if (tok == NULL) + return -1; + + while (tok != NULL) { + trim_space(tok); + if (op_type == RTE_BBDEV_OP_TURBO_DEC) { + if (op_decoder_flag_strtoul(tok, &op_flag_value) == -1) + return -1; + } else if (op_type == RTE_BBDEV_OP_TURBO_ENC) { + if (op_encoder_flag_strtoul(tok, &op_flag_value) == -1) + return -1; + } else { + return -1; + } + + *op_flags = *op_flags | op_flag_value; + + tok = strtok(NULL, VALUE_DELIMITER); + if (tok == NULL) + break; + } + + return 0; +} + +/* convert turbo encoder/decoder op_type from string to enum*/ +static int +op_turbo_type_strtol(char *token, enum rte_bbdev_op_type *op_type) +{ + trim_space(token); + if (!strcmp(token, "RTE_BBDEV_OP_TURBO_DEC")) + *op_type = RTE_BBDEV_OP_TURBO_DEC; + else if (!strcmp(token, "RTE_BBDEV_OP_TURBO_ENC")) + *op_type = RTE_BBDEV_OP_TURBO_ENC; + else if (!strcmp(token, "RTE_BBDEV_OP_NONE")) + *op_type = RTE_BBDEV_OP_NONE; + else { + printf("Not valid turbo op_type: '%s'\n", token); + return -1; + } + + return 0; +} + +/* tokenization expected status values separated by a comma */ +static int +parse_expected_status(char *tokens, int *status, enum rte_bbdev_op_type op_type) +{ + char *tok = NULL; + bool status_ok = false; + + tok = strtok(tokens, VALUE_DELIMITER); + if (tok == NULL) + return -1; + + while (tok != NULL) { + trim_space(tok); + if (!strcmp(tok, "OK")) + status_ok = true; + else if (!strcmp(tok, "DMA")) + *status = *status | (1 << RTE_BBDEV_DRV_ERROR); + else if (!strcmp(tok, "FCW")) + *status = *status | (1 << RTE_BBDEV_DATA_ERROR); + else if (!strcmp(tok, "CRC")) { + if (op_type == RTE_BBDEV_OP_TURBO_DEC) + *status = *status | (1 << RTE_BBDEV_CRC_ERROR); + else { + printf( + "CRC is only a valid value for turbo decoder\n"); + return -1; + } + } else { + printf("Not valid status: '%s'\n", tok); + return -1; + } + + tok = strtok(NULL, VALUE_DELIMITER); + if (tok == NULL) + break; + } + + if (status_ok && *status != 0) { + printf( + "Not valid status values. Cannot be OK and ERROR at the same time.\n"); + return -1; + } + + return 0; +} + +/* parse ops data entry (there can be more than 1 input entry, each will be + * contained in a separate op_data_buf struct) + */ +static int +parse_data_entry(const char *key_token, char *token, + struct test_bbdev_vector *vector, enum op_data_type type, + const char *prefix) +{ + int ret; + uint32_t data_length = 0; + uint32_t *data = NULL; + unsigned int id; + struct op_data_buf *op_data; + unsigned int *nb_ops; + + if (type >= DATA_NUM_TYPES) { + printf("Unknown op type: %d!\n", type); + return -1; + } + + op_data = vector->entries[type].segments; + nb_ops = &vector->entries[type].nb_segments; + + if (*nb_ops >= RTE_BBDEV_MAX_CODE_BLOCKS) { + printf("Too many segments (code blocks defined): %u, max %d!\n", + *nb_ops, RTE_BBDEV_MAX_CODE_BLOCKS); + return -1; + } + + if (sscanf(key_token + strlen(prefix), "%u", &id) != 1) { + printf("Missing ID of %s\n", prefix); + return -1; + } + if (id != *nb_ops) { + printf( + "Please order data entries sequentially, i.e. %s0, %s1, ...\n", + prefix, prefix); + return -1; + } + + /* Clear new op data struct */ + memset(op_data + *nb_ops, 0, sizeof(struct op_data_buf)); + + ret = parse_values(token, &data, &data_length); + if (!ret) { + op_data[*nb_ops].addr = data; + op_data[*nb_ops].length = data_length; + ++(*nb_ops); + } + + return ret; +} + +/* parses turbo decoder parameters and assigns to global variable */ +static int +parse_decoder_params(const char *key_token, char *token, + struct test_bbdev_vector *vector) +{ + int ret = 0, status = 0; + uint32_t op_flags = 0; + char *err = NULL; + + struct rte_bbdev_op_turbo_dec *turbo_dec = &vector->turbo_dec; + + /* compare keys */ + if (starts_with(key_token, op_data_prefixes[DATA_INPUT])) + ret = parse_data_entry(key_token, token, vector, + DATA_INPUT, op_data_prefixes[DATA_INPUT]); + + else if (starts_with(key_token, op_data_prefixes[DATA_SOFT_OUTPUT])) + ret = parse_data_entry(key_token, token, vector, + DATA_SOFT_OUTPUT, + op_data_prefixes[DATA_SOFT_OUTPUT]); + + else if (starts_with(key_token, op_data_prefixes[DATA_HARD_OUTPUT])) + ret = parse_data_entry(key_token, token, vector, + DATA_HARD_OUTPUT, + op_data_prefixes[DATA_HARD_OUTPUT]); + else if (!strcmp(key_token, "e")) { + vector->mask |= TEST_BBDEV_VF_E; + turbo_dec->cb_params.e = (uint32_t) strtoul(token, &err, 0); + } else if (!strcmp(key_token, "ea")) { + vector->mask |= TEST_BBDEV_VF_EA; + turbo_dec->tb_params.ea = (uint32_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "eb")) { + vector->mask |= TEST_BBDEV_VF_EB; + turbo_dec->tb_params.eb = (uint32_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "k")) { + vector->mask |= TEST_BBDEV_VF_K; + turbo_dec->cb_params.k = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "k_pos")) { + vector->mask |= TEST_BBDEV_VF_K_POS; + turbo_dec->tb_params.k_pos = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "k_neg")) { + vector->mask |= TEST_BBDEV_VF_K_NEG; + turbo_dec->tb_params.k_neg = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "c")) { + vector->mask |= TEST_BBDEV_VF_C; + turbo_dec->tb_params.c = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "c_neg")) { + vector->mask |= TEST_BBDEV_VF_C_NEG; + turbo_dec->tb_params.c_neg = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "cab")) { + vector->mask |= TEST_BBDEV_VF_CAB; + turbo_dec->tb_params.cab = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "rv_index")) { + vector->mask |= TEST_BBDEV_VF_RV_INDEX; + turbo_dec->rv_index = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "iter_max")) { + vector->mask |= TEST_BBDEV_VF_ITER_MAX; + turbo_dec->iter_max = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "iter_min")) { + vector->mask |= TEST_BBDEV_VF_ITER_MIN; + turbo_dec->iter_min = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "expected_iter_count")) { + vector->mask |= TEST_BBDEV_VF_EXPECTED_ITER_COUNT; + turbo_dec->iter_count = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "ext_scale")) { + vector->mask |= TEST_BBDEV_VF_EXT_SCALE; + turbo_dec->ext_scale = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "num_maps")) { + vector->mask |= TEST_BBDEV_VF_NUM_MAPS; + turbo_dec->num_maps = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "code_block_mode")) { + vector->mask |= TEST_BBDEV_VF_CODE_BLOCK_MODE; + turbo_dec->code_block_mode = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "op_flags")) { + vector->mask |= TEST_BBDEV_VF_OP_FLAGS; + ret = parse_turbo_flags(token, &op_flags, + vector->op_type); + if (!ret) + turbo_dec->op_flags = op_flags; + } else if (!strcmp(key_token, "expected_status")) { + vector->mask |= TEST_BBDEV_VF_EXPECTED_STATUS; + ret = parse_expected_status(token, &status, vector->op_type); + if (!ret) + vector->expected_status = status; + } else { + printf("Not valid dec key: '%s'\n", key_token); + return -1; + } + + if (ret != 0) { + printf("Failed with convert '%s\t%s'\n", key_token, token); + return -1; + } + + return 0; +} + +/* parses turbo encoder parameters and assigns to global variable */ +static int +parse_encoder_params(const char *key_token, char *token, + struct test_bbdev_vector *vector) +{ + int ret = 0, status = 0; + uint32_t op_flags = 0; + char *err = NULL; + + + struct rte_bbdev_op_turbo_enc *turbo_enc = &vector->turbo_enc; + + if (starts_with(key_token, op_data_prefixes[DATA_INPUT])) + ret = parse_data_entry(key_token, token, vector, + DATA_INPUT, op_data_prefixes[DATA_INPUT]); + else if (starts_with(key_token, "output")) + ret = parse_data_entry(key_token, token, vector, + DATA_HARD_OUTPUT, "output"); + else if (!strcmp(key_token, "e")) { + vector->mask |= TEST_BBDEV_VF_E; + turbo_enc->cb_params.e = (uint32_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "ea")) { + vector->mask |= TEST_BBDEV_VF_EA; + turbo_enc->tb_params.ea = (uint32_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "eb")) { + vector->mask |= TEST_BBDEV_VF_EB; + turbo_enc->tb_params.eb = (uint32_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "k")) { + vector->mask |= TEST_BBDEV_VF_K; + turbo_enc->cb_params.k = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "k_neg")) { + vector->mask |= TEST_BBDEV_VF_K_NEG; + turbo_enc->tb_params.k_neg = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "k_pos")) { + vector->mask |= TEST_BBDEV_VF_K_POS; + turbo_enc->tb_params.k_pos = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "c_neg")) { + vector->mask |= TEST_BBDEV_VF_C_NEG; + turbo_enc->tb_params.c_neg = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "c")) { + vector->mask |= TEST_BBDEV_VF_C; + turbo_enc->tb_params.c = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "cab")) { + vector->mask |= TEST_BBDEV_VF_CAB; + turbo_enc->tb_params.cab = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "rv_index")) { + vector->mask |= TEST_BBDEV_VF_RV_INDEX; + turbo_enc->rv_index = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "ncb")) { + vector->mask |= TEST_BBDEV_VF_NCB; + turbo_enc->cb_params.ncb = (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "ncb_neg")) { + vector->mask |= TEST_BBDEV_VF_NCB_NEG; + turbo_enc->tb_params.ncb_neg = + (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "ncb_pos")) { + vector->mask |= TEST_BBDEV_VF_NCB_POS; + turbo_enc->tb_params.ncb_pos = + (uint16_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "r")) { + vector->mask |= TEST_BBDEV_VF_R; + turbo_enc->tb_params.r = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "code_block_mode")) { + vector->mask |= TEST_BBDEV_VF_CODE_BLOCK_MODE; + turbo_enc->code_block_mode = (uint8_t) strtoul(token, &err, 0); + ret = ((err == NULL) || (*err != '\0')) ? -1 : 0; + } else if (!strcmp(key_token, "op_flags")) { + vector->mask |= TEST_BBDEV_VF_OP_FLAGS; + ret = parse_turbo_flags(token, &op_flags, + vector->op_type); + if (!ret) + turbo_enc->op_flags = op_flags; + } else if (!strcmp(key_token, "expected_status")) { + vector->mask |= TEST_BBDEV_VF_EXPECTED_STATUS; + ret = parse_expected_status(token, &status, vector->op_type); + if (!ret) + vector->expected_status = status; + } else { + printf("Not valid enc key: '%s'\n", key_token); + return -1; + } + + if (ret != 0) { + printf("Failed with convert '%s\t%s'\n", key_token, token); + return -1; + } + + return 0; +} + +/* checks the type of key and assigns data */ +static int +parse_entry(char *entry, struct test_bbdev_vector *vector) +{ + int ret = 0; + char *token, *key_token; + enum rte_bbdev_op_type op_type = RTE_BBDEV_OP_NONE; + + if (entry == NULL) { + printf("Expected entry value\n"); + return -1; + } + + /* get key */ + token = strtok(entry, ENTRY_DELIMITER); + key_token = token; + /* get values for key */ + token = strtok(NULL, ENTRY_DELIMITER); + + if (key_token == NULL || token == NULL) { + printf("Expected 'key = values' but was '%.40s'..\n", entry); + return -1; + } + trim_space(key_token); + + /* first key_token has to specify type of operation */ + if (vector->op_type == RTE_BBDEV_OP_NONE) { + if (!strcmp(key_token, "op_type")) { + ret = op_turbo_type_strtol(token, &op_type); + if (!ret) + vector->op_type = op_type; + return (!ret) ? 0 : -1; + } + printf("First key_token (%s) does not specify op_type\n", + key_token); + return -1; + } + + /* compare keys */ + if (vector->op_type == RTE_BBDEV_OP_TURBO_DEC) { + if (parse_decoder_params(key_token, token, vector) == -1) + return -1; + } else if (vector->op_type == RTE_BBDEV_OP_TURBO_ENC) { + if (parse_encoder_params(key_token, token, vector) == -1) + return -1; + } + + return 0; +} + +static int +check_decoder_segments(struct test_bbdev_vector *vector) +{ + unsigned char i; + struct rte_bbdev_op_turbo_dec *turbo_dec = &vector->turbo_dec; + + if (vector->entries[DATA_INPUT].nb_segments == 0) + return -1; + + for (i = 0; i < vector->entries[DATA_INPUT].nb_segments; i++) + if (vector->entries[DATA_INPUT].segments[i].addr == NULL) + return -1; + + if (vector->entries[DATA_HARD_OUTPUT].nb_segments == 0) + return -1; + + for (i = 0; i < vector->entries[DATA_HARD_OUTPUT].nb_segments; + i++) + if (vector->entries[DATA_HARD_OUTPUT].segments[i].addr == NULL) + return -1; + + if ((turbo_dec->op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT) && + (vector->entries[DATA_SOFT_OUTPUT].nb_segments == 0)) + return -1; + + for (i = 0; i < vector->entries[DATA_SOFT_OUTPUT].nb_segments; + i++) + if (vector->entries[DATA_SOFT_OUTPUT].segments[i].addr == NULL) + return -1; + + return 0; +} + +static int +check_decoder_llr_spec(struct test_bbdev_vector *vector) +{ + struct rte_bbdev_op_turbo_dec *turbo_dec = &vector->turbo_dec; + + /* Check input LLR sign formalism specification */ + if ((turbo_dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN) && + (turbo_dec->op_flags & + RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)) { + printf( + "Both positive and negative LLR input flags were set!\n"); + return -1; + } + if (!(turbo_dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN) && + !(turbo_dec->op_flags & + RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)) { + printf( + "WARNING: input LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n"); + turbo_dec->op_flags |= RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN; + } + + if (!(turbo_dec->op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT)) + return 0; + + /* Check output LLR sign formalism specification */ + if ((turbo_dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT) && + (turbo_dec->op_flags & + RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT)) { + printf( + "Both positive and negative LLR output flags were set!\n"); + return -1; + } + if (!(turbo_dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT) && + !(turbo_dec->op_flags & + RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT)) { + printf( + "WARNING: soft output LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n"); + turbo_dec->op_flags |= + RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT; + } + + return 0; +} + +/* checks decoder parameters */ +static int +check_decoder(struct test_bbdev_vector *vector) +{ + struct rte_bbdev_op_turbo_dec *turbo_dec = &vector->turbo_dec; + const int mask = vector->mask; + + if (check_decoder_segments(vector) < 0) + return -1; + + if (check_decoder_llr_spec(vector) < 0) + return -1; + + /* Check which params were set */ + if (!(mask & TEST_BBDEV_VF_CODE_BLOCK_MODE)) { + printf( + "WARNING: code_block_mode was not specified in vector file and will be set to 1 (0 - TB Mode, 1 - CB mode)\n"); + turbo_dec->code_block_mode = 1; + } + if (turbo_dec->code_block_mode == 0) { + if (!(mask & TEST_BBDEV_VF_EA)) + printf( + "WARNING: ea was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_EB)) + printf( + "WARNING: eb was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_K_NEG)) + printf( + "WARNING: k_neg was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_K_POS)) + printf( + "WARNING: k_pos was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_C_NEG)) + printf( + "WARNING: c_neg was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_C)) { + printf( + "WARNING: c was not specified in vector file and will be set to 1\n"); + turbo_dec->tb_params.c = 1; + } + if (!(mask & TEST_BBDEV_VF_CAB)) + printf( + "WARNING: cab was not specified in vector file and will be set to 0\n"); + } else { + if (!(mask & TEST_BBDEV_VF_E)) + printf( + "WARNING: e was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_K)) + printf( + "WARNING: k was not specified in vector file and will be set to 0\n"); + } + if (!(mask & TEST_BBDEV_VF_RV_INDEX)) + printf( + "WARNING: rv_index was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_ITER_MIN)) + printf( + "WARNING: iter_min was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_ITER_MAX)) + printf( + "WARNING: iter_max was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)) + printf( + "WARNING: expected_iter_count was not specified in vector file and iter_count will not be validated\n"); + if (!(mask & TEST_BBDEV_VF_EXT_SCALE)) + printf( + "WARNING: ext_scale was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_OP_FLAGS)) { + printf( + "WARNING: op_flags was not specified in vector file and capabilities will not be validated\n"); + turbo_dec->num_maps = 0; + } else if (!(turbo_dec->op_flags & RTE_BBDEV_TURBO_MAP_DEC) && + mask & TEST_BBDEV_VF_NUM_MAPS) { + printf( + "WARNING: RTE_BBDEV_TURBO_MAP_DEC was not set in vector file and num_maps will be set to 0\n"); + turbo_dec->num_maps = 0; + } + if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS)) + printf( + "WARNING: expected_status was not specified in vector file and will be set to 0\n"); + return 0; +} + +/* checks encoder parameters */ +static int +check_encoder(struct test_bbdev_vector *vector) +{ + unsigned char i; + const int mask = vector->mask; + + if (vector->entries[DATA_INPUT].nb_segments == 0) + return -1; + + for (i = 0; i < vector->entries[DATA_INPUT].nb_segments; i++) + if (vector->entries[DATA_INPUT].segments[i].addr == NULL) + return -1; + + if (vector->entries[DATA_HARD_OUTPUT].nb_segments == 0) + return -1; + + for (i = 0; i < vector->entries[DATA_HARD_OUTPUT].nb_segments; i++) + if (vector->entries[DATA_HARD_OUTPUT].segments[i].addr == NULL) + return -1; + + if (!(mask & TEST_BBDEV_VF_CODE_BLOCK_MODE)) { + printf( + "WARNING: code_block_mode was not specified in vector file and will be set to 1\n"); + vector->turbo_enc.code_block_mode = 1; + } + if (vector->turbo_enc.code_block_mode == 0) { + if (!(mask & TEST_BBDEV_VF_EA) && (vector->turbo_enc.op_flags & + RTE_BBDEV_TURBO_RATE_MATCH)) + printf( + "WARNING: ea was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_EB) && (vector->turbo_enc.op_flags & + RTE_BBDEV_TURBO_RATE_MATCH)) + printf( + "WARNING: eb was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_K_NEG)) + printf( + "WARNING: k_neg was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_K_POS)) + printf( + "WARNING: k_pos was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_C_NEG)) + printf( + "WARNING: c_neg was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_C)) { + printf( + "WARNING: c was not specified in vector file and will be set to 1\n"); + vector->turbo_enc.tb_params.c = 1; + } + if (!(mask & TEST_BBDEV_VF_CAB) && (vector->turbo_enc.op_flags & + RTE_BBDEV_TURBO_RATE_MATCH)) + printf( + "WARNING: cab was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_NCB_NEG)) + printf( + "WARNING: ncb_neg was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_NCB_POS)) + printf( + "WARNING: ncb_pos was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_R)) + printf( + "WARNING: r was not specified in vector file and will be set to 0\n"); + } else { + if (!(mask & TEST_BBDEV_VF_E) && (vector->turbo_enc.op_flags & + RTE_BBDEV_TURBO_RATE_MATCH)) + printf( + "WARNING: e was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_K)) + printf( + "WARNING: k was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_NCB)) + printf( + "WARNING: ncb was not specified in vector file and will be set to 0\n"); + } + if (!(mask & TEST_BBDEV_VF_RV_INDEX)) + printf( + "WARNING: rv_index was not specified in vector file and will be set to 0\n"); + if (!(mask & TEST_BBDEV_VF_OP_FLAGS)) + printf( + "WARNING: op_flags was not specified in vector file and capabilities will not be validated\n"); + if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS)) + printf( + "WARNING: expected_status was not specified in vector file and will be set to 0\n"); + + return 0; +} + +static int +bbdev_check_vector(struct test_bbdev_vector *vector) +{ + if (vector->op_type == RTE_BBDEV_OP_TURBO_DEC) { + if (check_decoder(vector) == -1) + return -1; + } else if (vector->op_type == RTE_BBDEV_OP_TURBO_ENC) { + if (check_encoder(vector) == -1) + return -1; + } else if (vector->op_type != RTE_BBDEV_OP_NONE) { + printf("Vector was not filled\n"); + return -1; + } + + return 0; +} + +int +test_bbdev_vector_read(const char *filename, + struct test_bbdev_vector *vector) +{ + int ret = 0; + size_t len = 0; + + FILE *fp = NULL; + char *line = NULL; + char *entry = NULL; + + fp = fopen(filename, "r"); + if (fp == NULL) { + printf("File %s does not exist\n", filename); + return -1; + } + + while (getline(&line, &len, fp) != -1) { + + /* ignore comments and new lines */ + if (line[0] == '#' || line[0] == '/' || line[0] == '\n' + || line[0] == '\r') + continue; + + trim_space(line); + + /* buffer for multiline */ + entry = realloc(entry, strlen(line) + 1); + if (entry == NULL) { + printf("Fail to realloc %zu bytes\n", strlen(line) + 1); + ret = -ENOMEM; + goto exit; + } + + memset(entry, 0, strlen(line) + 1); + strncpy(entry, line, strlen(line)); + + /* check if entry ends with , or = */ + if (entry[strlen(entry) - 1] == ',' + || entry[strlen(entry) - 1] == '=') { + while (getline(&line, &len, fp) != -1) { + trim_space(line); + + /* extend entry about length of new line */ + char *entry_extended = realloc(entry, + strlen(line) + + strlen(entry) + 1); + + if (entry_extended == NULL) { + printf("Fail to allocate %zu bytes\n", + strlen(line) + + strlen(entry) + 1); + ret = -ENOMEM; + goto exit; + } + + entry = entry_extended; + strncat(entry, line, strlen(line)); + + if (entry[strlen(entry) - 1] != ',') + break; + } + } + ret = parse_entry(entry, vector); + if (ret != 0) { + printf("An error occurred while parsing!\n"); + goto exit; + } + } + ret = bbdev_check_vector(vector); + if (ret != 0) + printf("An error occurred while checking!\n"); + +exit: + fclose(fp); + free(line); + free(entry); + + return ret; +} diff --git a/app/test-bbdev/test_bbdev_vector.h b/app/test-bbdev/test_bbdev_vector.h new file mode 100644 index 00000000..476aae13 --- /dev/null +++ b/app/test-bbdev/test_bbdev_vector.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef TEST_BBDEV_VECTOR_H_ +#define TEST_BBDEV_VECTOR_H_ + +#include <rte_bbdev_op.h> + +/* Flags which are set when specific parameter is define in vector file */ +enum { + TEST_BBDEV_VF_E = (1ULL << 0), + TEST_BBDEV_VF_EA = (1ULL << 1), + TEST_BBDEV_VF_EB = (1ULL << 2), + TEST_BBDEV_VF_K = (1ULL << 3), + TEST_BBDEV_VF_K_NEG = (1ULL << 4), + TEST_BBDEV_VF_K_POS = (1ULL << 5), + TEST_BBDEV_VF_C_NEG = (1ULL << 6), + TEST_BBDEV_VF_C = (1ULL << 7), + TEST_BBDEV_VF_CAB = (1ULL << 8), + TEST_BBDEV_VF_RV_INDEX = (1ULL << 9), + TEST_BBDEV_VF_ITER_MAX = (1ULL << 10), + TEST_BBDEV_VF_ITER_MIN = (1ULL << 11), + TEST_BBDEV_VF_EXPECTED_ITER_COUNT = (1ULL << 12), + TEST_BBDEV_VF_EXT_SCALE = (1ULL << 13), + TEST_BBDEV_VF_NUM_MAPS = (1ULL << 14), + TEST_BBDEV_VF_NCB = (1ULL << 15), + TEST_BBDEV_VF_NCB_NEG = (1ULL << 16), + TEST_BBDEV_VF_NCB_POS = (1ULL << 17), + TEST_BBDEV_VF_R = (1ULL << 18), + TEST_BBDEV_VF_CODE_BLOCK_MODE = (1ULL << 19), + TEST_BBDEV_VF_OP_FLAGS = (1ULL << 20), + TEST_BBDEV_VF_EXPECTED_STATUS = (1ULL << 21), +}; + +enum op_data_type { + DATA_INPUT = 0, + DATA_SOFT_OUTPUT, + DATA_HARD_OUTPUT, + DATA_NUM_TYPES, +}; + +struct op_data_buf { + uint32_t *addr; + uint32_t length; +}; + +struct op_data_entries { + struct op_data_buf segments[RTE_BBDEV_MAX_CODE_BLOCKS]; + unsigned int nb_segments; +}; + +struct test_bbdev_vector { + enum rte_bbdev_op_type op_type; + int expected_status; + int mask; + union { + struct rte_bbdev_op_turbo_dec turbo_dec; + struct rte_bbdev_op_turbo_enc turbo_enc; + }; + /* Additional storage for op data entries */ + struct op_data_entries entries[DATA_NUM_TYPES]; +}; + +/* fills test vector parameters based on test file */ +int +test_bbdev_vector_read(const char *filename, + struct test_bbdev_vector *vector); + + +#endif /* TEST_BBDEV_VECTOR_H_ */ diff --git a/app/test-bbdev/test_vectors/bbdev_vector_null.data b/app/test-bbdev/test_vectors/bbdev_vector_null.data new file mode 100644 index 00000000..c9a9abe9 --- /dev/null +++ b/app/test-bbdev/test_vectors/bbdev_vector_null.data @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_NONE
\ No newline at end of file diff --git a/app/test-bbdev/test_vectors/bbdev_vector_td_default.data b/app/test-bbdev/test_vectors/bbdev_vector_td_default.data new file mode 100644 index 00000000..b5c30278 --- /dev/null +++ b/app/test-bbdev/test_vectors/bbdev_vector_td_default.data @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_DEC + +input0 = +0x7f007f00, 0x7f817f00, 0x767f8100, 0x817f8100, 0x81008100, 0x7f818100, 0x81817f00, 0x7f818100, +0x86007f00, 0x7f818100, 0x887f8100, 0x81815200, 0x81008100, 0x817f7f00, 0x7f7f8100, 0x9e817f00, +0x7f7f0000, 0xb97f0000, 0xa7810000, 0x7f7f4a7f, 0x7f810000, 0x7f7f7f7f, 0x81720000, 0x40658181, +0x84810000, 0x817f0000, 0x81810000, 0x7f818181, 0x7f810000, 0x81815a81, 0x817f0000, 0x7a867f7b, +0x817f0000, 0x6b7f0000, 0x7f810000, 0x81818181, 0x817f0000, 0x7f7f817f, 0x7f7f0000, 0xab7f4f7f, +0x817f0000, 0x817f6c00, 0x81810000, 0x817f8181, 0x7f810000, 0x81816981, 0x7f7f0000, 0x007f8181 + +hard_output0 = +0xa7d6732e, 0x61 + +soft_output0 = +0x7f7f7f7f, 0x81817f7f, 0x7f817f81, 0x817f7f81, 0x81817f81, 0x81817f81, 0x8181817f, 0x7f81817f, +0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x817f8181, 0x81818181, 0x817f7f7f, 0x7f818181, 0x817f817f, +0x81818181, 0x81817f7f, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, +0x81817f7f, 0x81817f7f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f8181, 0x81817f81, 0x817f7f7f, +0x7f7f8181 + +e = +17280 + +k = +40 + +rv_index = +1 + +iter_max = +8 + +iter_min = +4 + +expected_iter_count = +8 + +ext_scale = +15 + +num_maps = +0 + +op_flags = +RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN, +RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT + +expected_status = +OK diff --git a/app/test-bbdev/test_vectors/bbdev_vector_te_default.data b/app/test-bbdev/test_vectors/bbdev_vector_te_default.data new file mode 100644 index 00000000..883a76cf --- /dev/null +++ b/app/test-bbdev/test_vectors/bbdev_vector_te_default.data @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +op_type = +RTE_BBDEV_OP_TURBO_ENC + +input0 = +0x11d2bcac, 0x4d + +output0 = +0xd2399179, 0x640eb999, 0x2cbaf577, 0xaf224ae2, 0x9d139927, 0xe6909b29, 0xa25b7f47, 0x2aa224ce, +0x79f2 + +e = +272 + +k = +40 + +ncb = +192 + +rv_index = +0 + +code_block_mode = +1 + +op_flags = +RTE_BBDEV_TURBO_RATE_MATCH + +expected_status = +OK diff --git a/app/test-crypto-perf/Makefile b/app/test-crypto-perf/Makefile index c75d7ed1..3935aec4 100644 --- a/app/test-crypto-perf/Makefile +++ b/app/test-crypto-perf/Makefile @@ -1,32 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2016-2017 Intel Corporation. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2016-2017 Intel Corporation include $(RTE_SDK)/mk/rte.vars.mk diff --git a/app/test-crypto-perf/cperf.h b/app/test-crypto-perf/cperf.h index c9f7f817..db58228d 100644 --- a/app/test-crypto-perf/cperf.h +++ b/app/test-crypto-perf/cperf.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifndef _CPERF_ diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index 23d30ca3..8f320099 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <rte_cryptodev.h> @@ -41,7 +13,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector __rte_unused, - uint16_t iv_offset __rte_unused) + uint16_t iv_offset __rte_unused, uint32_t *imix_idx) { uint16_t i; @@ -62,7 +34,12 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ - sym_op->cipher.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; sym_op->cipher.data.offset = 0; } @@ -75,7 +52,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector __rte_unused, - uint16_t iv_offset __rte_unused) + uint16_t iv_offset __rte_unused, uint32_t *imix_idx) { uint16_t i; @@ -96,7 +73,12 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops, dst_buf_offset); /* auth parameters */ - sym_op->auth.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; sym_op->auth.data.offset = 0; } @@ -109,7 +91,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -130,12 +112,17 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; + if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) - sym_op->cipher.data.length = options->test_buffer_size << 3; - else - sym_op->cipher.data.length = options->test_buffer_size; + sym_op->cipher.data.length <<= 3; sym_op->cipher.data.offset = 0; } @@ -160,7 +147,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -225,12 +212,17 @@ cperf_set_ops_auth(struct rte_crypto_op **ops, } + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; + if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) - sym_op->auth.data.length = options->test_buffer_size << 3; - else - sym_op->auth.data.length = options->test_buffer_size; + sym_op->auth.data.length <<= 3; sym_op->auth.data.offset = 0; } @@ -255,7 +247,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; @@ -276,12 +268,17 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, dst_buf_offset); /* cipher parameters */ + if (options->imix_distribution_count) { + sym_op->cipher.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->cipher.data.length = options->test_buffer_size; + if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) - sym_op->cipher.data.length = options->test_buffer_size << 3; - else - sym_op->cipher.data.length = options->test_buffer_size; + sym_op->cipher.data.length <<= 3; sym_op->cipher.data.offset = 0; @@ -321,12 +318,17 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, rte_pktmbuf_iova_offset(buf, offset); } + if (options->imix_distribution_count) { + sym_op->auth.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->auth.data.length = options->test_buffer_size; + if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) - sym_op->auth.data.length = options->test_buffer_size << 3; - else - sym_op->auth.data.length = options->test_buffer_size; + sym_op->auth.data.length <<= 3; sym_op->auth.data.offset = 0; } @@ -360,7 +362,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset) + uint16_t iv_offset, uint32_t *imix_idx) { uint16_t i; /* AAD is placed after the IV */ @@ -384,7 +386,12 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, dst_buf_offset); /* AEAD parameters */ - sym_op->aead.data.length = options->test_buffer_size; + if (options->imix_distribution_count) { + sym_op->aead.data.length = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + sym_op->aead.data.length = options->test_buffer_size; sym_op->aead.data.offset = 0; sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i], diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h index 94951cc3..29e109f2 100644 --- a/app/test-crypto-perf/cperf_ops.h +++ b/app/test-crypto-perf/cperf_ops.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifndef _CPERF_OPS_ @@ -51,7 +23,7 @@ typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, const struct cperf_options *options, const struct cperf_test_vector *test_vector, - uint16_t iv_offset); + uint16_t iv_offset, uint32_t *imix_idx); struct cperf_op_fns { cperf_sessions_create_t sess_create; diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h index da4fb47c..54a3ad5c 100644 --- a/app/test-crypto-perf/cperf_options.h +++ b/app/test-crypto-perf/cperf_options.h @@ -3,6 +3,7 @@ #define _CPERF_OPTIONS_ #include <rte_crypto.h> +#include <rte_cryptodev.h> #define CPERF_PTEST_TYPE ("ptest") #define CPERF_SILENT ("silent") @@ -13,6 +14,7 @@ #define CPERF_BUFFER_SIZE ("buffer-sz") #define CPERF_SEGMENT_SIZE ("segment-sz") #define CPERF_DESC_NB ("desc-nb") +#define CPERF_IMIX ("imix") #define CPERF_DEVTYPE ("devtype") #define CPERF_OPTYPE ("optype") @@ -73,6 +75,7 @@ struct cperf_options { uint32_t total_ops; uint32_t segment_sz; uint32_t test_buffer_size; + uint32_t *imix_buffer_sizes; uint32_t nb_descriptors; uint16_t nb_qps; @@ -102,7 +105,7 @@ struct cperf_options { uint16_t digest_sz; - char device_type[RTE_CRYPTODEV_NAME_LEN]; + char device_type[RTE_CRYPTODEV_NAME_MAX_LEN]; enum cperf_op_type op_type; char *test_file; @@ -122,6 +125,8 @@ struct cperf_options { /* pmd-cyclecount specific options */ uint32_t pmdcc_delay; + uint32_t imix_distribution_list[MAX_LIST]; + uint8_t imix_distribution_count; }; void diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c index ad43e84c..7a5aa06a 100644 --- a/app/test-crypto-perf/cperf_options_parsing.c +++ b/app/test-crypto-perf/cperf_options_parsing.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <getopt.h> @@ -57,6 +29,7 @@ usage(char *progname) " --total-ops N: set the number of total operations performed\n" " --burst-sz N: set the number of packets per burst\n" " --buffer-sz N: set the size of a single packet\n" + " --imix N: set the distribution of packet sizes\n" " --segment-sz N: set the size of the segment to use\n" " --desc-nb N: set number of descriptors for each crypto device\n" " --devtype TYPE: set crypto device type to use\n" @@ -243,6 +216,8 @@ parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max) char *token; uint32_t number; uint8_t count = 0; + uint32_t temp_min; + uint32_t temp_max; char *copy_arg = strdup(arg); @@ -261,8 +236,8 @@ parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max) goto err_list; list[count++] = number; - *min = number; - *max = number; + temp_min = number; + temp_max = number; } else goto err_list; @@ -283,14 +258,19 @@ parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max) list[count++] = number; - if (number < *min) - *min = number; - if (number > *max) - *max = number; + if (number < temp_min) + temp_min = number; + if (number > temp_max) + temp_max = number; token = strtok(NULL, ","); } + if (min) + *min = temp_min; + if (max) + *max = temp_max; + free(copy_arg); return count; @@ -387,6 +367,29 @@ parse_segment_sz(struct cperf_options *opts, const char *arg) } static int +parse_imix(struct cperf_options *opts, const char *arg) +{ + int ret; + + ret = parse_list(arg, opts->imix_distribution_list, + NULL, NULL); + if (ret < 0) { + RTE_LOG(ERR, USER1, "failed to parse imix distribution\n"); + return -1; + } + + opts->imix_distribution_count = ret; + + if (opts->imix_distribution_count <= 1) { + RTE_LOG(ERR, USER1, "imix distribution should have " + "at least two entries\n"); + return -1; + } + + return 0; +} + +static int parse_desc_nb(struct cperf_options *opts, const char *arg) { int ret = parse_uint32_t(&opts->nb_descriptors, arg); @@ -722,6 +725,7 @@ static struct option lgopts[] = { { CPERF_SEGMENT_SIZE, required_argument, 0, 0 }, { CPERF_DESC_NB, required_argument, 0, 0 }, + { CPERF_IMIX, required_argument, 0, 0 }, { CPERF_DEVTYPE, required_argument, 0, 0 }, { CPERF_OPTYPE, required_argument, 0, 0 }, @@ -786,6 +790,7 @@ cperf_options_default(struct cperf_options *opts) */ opts->segment_sz = 0; + opts->imix_distribution_count = 0; strncpy(opts->device_type, "crypto_aesni_mb", sizeof(opts->device_type)); opts->nb_qps = 1; @@ -835,6 +840,7 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts) { CPERF_OPTYPE, parse_op_type }, { CPERF_SESSIONLESS, parse_sessionless }, { CPERF_OUT_OF_PLACE, parse_out_of_place }, + { CPERF_IMIX, parse_imix }, { CPERF_TEST_FILE, parse_test_file }, { CPERF_TEST_NAME, parse_test_name }, { CPERF_CIPHER_ALGO, parse_cipher_algo }, @@ -973,6 +979,14 @@ cperf_options_check(struct cperf_options *options) return -EINVAL; } + if ((options->imix_distribution_count != 0) && + (options->imix_distribution_count != + options->buffer_size_count)) { + RTE_LOG(ERR, USER1, "IMIX distribution must have the same " + "number of buffer sizes\n"); + return -EINVAL; + } + if (options->test == CPERF_TEST_TYPE_VERIFY && options->test_file == NULL) { RTE_LOG(ERR, USER1, "Define path to the file with test" @@ -1025,6 +1039,13 @@ cperf_options_check(struct cperf_options *options) return -EINVAL; } + if (options->test == CPERF_TEST_TYPE_VERIFY && + options->imix_distribution_count > 0) { + RTE_LOG(ERR, USER1, "IMIX is not allowed when " + "using the verify test.\n"); + return -EINVAL; + } + if (options->op_type == CPERF_CIPHER_THEN_AUTH) { if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT && options->auth_op != diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c index 328744ef..21cb1c22 100644 --- a/app/test-crypto-perf/cperf_test_common.c +++ b/app/test-crypto-perf/cperf_test_common.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include <rte_malloc.h> diff --git a/app/test-crypto-perf/cperf_test_common.h b/app/test-crypto-perf/cperf_test_common.h index 4cee7852..3ace0d2e 100644 --- a/app/test-crypto-perf/cperf_test_common.h +++ b/app/test-crypto-perf/cperf_test_common.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #ifndef _CPERF_TEST_COMMON_H_ diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c index ca2a4ba6..c9c98dc5 100644 --- a/app/test-crypto-perf/cperf_test_latency.c +++ b/app/test-crypto-perf/cperf_test_latency.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <rte_malloc.h> @@ -154,6 +126,7 @@ cperf_latency_test_runner(void *arg) struct cperf_latency_ctx *ctx = arg; uint16_t test_burst_size; uint8_t burst_size_idx = 0; + uint32_t imix_idx = 0; static int only_once; @@ -218,7 +191,7 @@ cperf_latency_test_runner(void *arg) burst_size) != 0) { RTE_LOG(ERR, USER1, "Failed to allocate more crypto operations " - "from the the crypto operation pool.\n" + "from the crypto operation pool.\n" "Consider increasing the pool size " "with --pool-sz\n"); return -1; @@ -228,7 +201,8 @@ cperf_latency_test_runner(void *arg) (ctx->populate_ops)(ops, ctx->src_buf_offset, ctx->dst_buf_offset, burst_size, ctx->sess, ctx->options, - ctx->test_vector, iv_offset); + ctx->test_vector, iv_offset, + &imix_idx); tsc_start = rte_rdtsc_precise(); diff --git a/app/test-crypto-perf/cperf_test_latency.h b/app/test-crypto-perf/cperf_test_latency.h index 1bbedb4e..d3fc3218 100644 --- a/app/test-crypto-perf/cperf_test_latency.h +++ b/app/test-crypto-perf/cperf_test_latency.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifndef _CPERF_LATENCY_ diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c index 9b41724a..8f761608 100644 --- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include <stdbool.h> @@ -169,6 +141,7 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op, uint32_t iter_ops_needed = RTE_MIN(state->opts->nb_descriptors, iter_ops_left); uint32_t cur_iter_op; + uint32_t imix_idx = 0; for (cur_iter_op = 0; cur_iter_op < iter_ops_needed; cur_iter_op += test_burst_size) { @@ -181,7 +154,7 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op, burst_size) != 0) { RTE_LOG(ERR, USER1, "Failed to allocate more crypto operations " - "from the the crypto operation pool.\n" + "from the crypto operation pool.\n" "Consider increasing the pool size " "with --pool-sz\n"); return -1; @@ -193,7 +166,8 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op, state->ctx->dst_buf_offset, burst_size, state->ctx->sess, state->opts, - state->ctx->test_vector, iv_offset); + state->ctx->test_vector, iv_offset, + &imix_idx); #ifdef CPERF_LINEARIZATION_ENABLE /* Check if source mbufs require coalescing */ @@ -218,6 +192,7 @@ pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state, uint32_t iter_ops_needed, uint16_t test_burst_size) { uint32_t cur_iter_op; + uint32_t imix_idx = 0; for (cur_iter_op = 0; cur_iter_op < iter_ops_needed; cur_iter_op += test_burst_size) { @@ -230,7 +205,7 @@ pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state, burst_size) != 0) { RTE_LOG(ERR, USER1, "Failed to allocate more crypto operations " - "from the the crypto operation pool.\n" + "from the crypto operation pool.\n" "Consider increasing the pool size " "with --pool-sz\n"); return -1; @@ -242,7 +217,8 @@ pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state, state->ctx->dst_buf_offset, burst_size, state->ctx->sess, state->opts, - state->ctx->test_vector, iv_offset); + state->ctx->test_vector, iv_offset, + &imix_idx); } return 0; } diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.h b/app/test-crypto-perf/cperf_test_pmd_cyclecount.h index 93f0eae0..beb44199 100644 --- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.h +++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #ifndef _CPERF_TEST_PMD_CYCLECOUNT_H_ diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c index b84dc630..8766d6e9 100644 --- a/app/test-crypto-perf/cperf_test_throughput.c +++ b/app/test-crypto-perf/cperf_test_throughput.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <rte_malloc.h> @@ -120,6 +92,7 @@ cperf_throughput_test_runner(void *test_ctx) struct cperf_throughput_ctx *ctx = test_ctx; uint16_t test_burst_size; uint8_t burst_size_idx = 0; + uint32_t imix_idx = 0; static int only_once; @@ -182,7 +155,7 @@ cperf_throughput_test_runner(void *test_ctx) ops_needed) != 0) { RTE_LOG(ERR, USER1, "Failed to allocate more crypto operations " - "from the the crypto operation pool.\n" + "from the crypto operation pool.\n" "Consider increasing the pool size " "with --pool-sz\n"); return -1; @@ -193,7 +166,7 @@ cperf_throughput_test_runner(void *test_ctx) ctx->dst_buf_offset, ops_needed, ctx->sess, ctx->options, ctx->test_vector, - iv_offset); + iv_offset, &imix_idx); /** * When ops_needed is smaller than ops_enqd, the diff --git a/app/test-crypto-perf/cperf_test_throughput.h b/app/test-crypto-perf/cperf_test_throughput.h index 987d0c31..439ec8e5 100644 --- a/app/test-crypto-perf/cperf_test_throughput.h +++ b/app/test-crypto-perf/cperf_test_throughput.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifndef _CPERF_THROUGHPUT_ diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c index d4736f9e..26321d00 100644 --- a/app/test-crypto-perf/cperf_test_vector_parsing.c +++ b/app/test-crypto-perf/cperf_test_vector_parsing.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifdef RTE_EXEC_ENV_BSDAPP #define _WITH_GETLINE diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.h b/app/test-crypto-perf/cperf_test_vector_parsing.h index e3df98bd..247b1422 100644 --- a/app/test-crypto-perf/cperf_test_vector_parsing.h +++ b/app/test-crypto-perf/cperf_test_vector_parsing.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifndef APP_CRYPTO_PERF_CPERF_TEST_VECTOR_PARSING_H_ diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c index fa911ff6..907a995c 100644 --- a/app/test-crypto-perf/cperf_test_vectors.c +++ b/app/test-crypto-perf/cperf_test_vectors.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <rte_crypto.h> diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h index cb5d8284..6f10823e 100644 --- a/app/test-crypto-perf/cperf_test_vectors.h +++ b/app/test-crypto-perf/cperf_test_vectors.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifndef _CPERF_TEST_VECTRORS_ diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c index 6945c8b4..9134b921 100644 --- a/app/test-crypto-perf/cperf_test_verify.c +++ b/app/test-crypto-perf/cperf_test_verify.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <rte_malloc.h> @@ -264,6 +236,7 @@ cperf_verify_test_runner(void *test_ctx) uint64_t i; uint16_t ops_unused = 0; + uint32_t imix_idx = 0; struct rte_crypto_op *ops[ctx->options->max_burst_size]; struct rte_crypto_op *ops_processed[ctx->options->max_burst_size]; @@ -307,7 +280,7 @@ cperf_verify_test_runner(void *test_ctx) ops_needed) != 0) { RTE_LOG(ERR, USER1, "Failed to allocate more crypto operations " - "from the the crypto operation pool.\n" + "from the crypto operation pool.\n" "Consider increasing the pool size " "with --pool-sz\n"); return -1; @@ -317,7 +290,7 @@ cperf_verify_test_runner(void *test_ctx) (ctx->populate_ops)(ops, ctx->src_buf_offset, ctx->dst_buf_offset, ops_needed, ctx->sess, ctx->options, - ctx->test_vector, iv_offset); + ctx->test_vector, iv_offset, &imix_idx); /* Populate the mbuf with the test vector, for verification */ diff --git a/app/test-crypto-perf/cperf_test_verify.h b/app/test-crypto-perf/cperf_test_verify.h index e67b48d3..9f70ad87 100644 --- a/app/test-crypto-perf/cperf_test_verify.h +++ b/app/test-crypto-perf/cperf_test_verify.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #ifndef _CPERF_VERIFY_ diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c index 29373f5b..019d8359 100644 --- a/app/test-crypto-perf/main.c +++ b/app/test-crypto-perf/main.c @@ -1,38 +1,12 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <stdio.h> #include <unistd.h> +#include <rte_malloc.h> +#include <rte_random.h> #include <rte_eal.h> #include <rte_cryptodev.h> #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER @@ -536,13 +510,45 @@ main(int argc, char **argv) i++; } - /* Get first size from range or list */ - if (opts.inc_buffer_size != 0) - opts.test_buffer_size = opts.min_buffer_size; - else - opts.test_buffer_size = opts.buffer_size_list[0]; + if (opts.imix_distribution_count != 0) { + uint8_t buffer_size_count = opts.buffer_size_count; + uint16_t distribution_total[buffer_size_count]; + uint32_t op_idx; + uint32_t test_average_size = 0; + const uint32_t *buffer_size_list = opts.buffer_size_list; + const uint32_t *imix_distribution_list = opts.imix_distribution_list; + + opts.imix_buffer_sizes = rte_malloc(NULL, + sizeof(uint32_t) * opts.pool_sz, + 0); + /* + * Calculate accumulated distribution of + * probabilities per packet size + */ + distribution_total[0] = imix_distribution_list[0]; + for (i = 1; i < buffer_size_count; i++) + distribution_total[i] = imix_distribution_list[i] + + distribution_total[i-1]; + + /* Calculate a random sequence of packet sizes, based on distribution */ + for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) { + uint16_t random_number = rte_rand() % + distribution_total[buffer_size_count - 1]; + for (i = 0; i < buffer_size_count; i++) + if (random_number < distribution_total[i]) + break; + + opts.imix_buffer_sizes[op_idx] = buffer_size_list[i]; + } + + /* Calculate average buffer size for the IMIX distribution */ + for (i = 0; i < buffer_size_count; i++) + test_average_size += buffer_size_list[i] * + imix_distribution_list[i]; + + opts.test_buffer_size = test_average_size / + distribution_total[buffer_size_count - 1]; - while (opts.test_buffer_size <= opts.max_buffer_size) { i = 0; RTE_LCORE_FOREACH_SLAVE(lcore_id) { @@ -561,14 +567,43 @@ main(int argc, char **argv) rte_eal_wait_lcore(lcore_id); i++; } + } else { /* Get next size from range or list */ if (opts.inc_buffer_size != 0) - opts.test_buffer_size += opts.inc_buffer_size; - else { - if (++buffer_size_idx == opts.buffer_size_count) - break; - opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx]; + opts.test_buffer_size = opts.min_buffer_size; + else + opts.test_buffer_size = opts.buffer_size_list[0]; + + while (opts.test_buffer_size <= opts.max_buffer_size) { + i = 0; + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + + if (i == total_nb_qps) + break; + + rte_eal_remote_launch(cperf_testmap[opts.test].runner, + ctx[i], lcore_id); + i++; + } + i = 0; + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + + if (i == total_nb_qps) + break; + rte_eal_wait_lcore(lcore_id); + i++; + } + + /* Get next size from range or list */ + if (opts.inc_buffer_size != 0) + opts.test_buffer_size += opts.inc_buffer_size; + else { + if (++buffer_size_idx == opts.buffer_size_count) + break; + opts.test_buffer_size = + opts.buffer_size_list[buffer_size_idx]; + } } } @@ -597,8 +632,6 @@ err: if (i == total_nb_qps) break; - cdev_id = enabled_cdevs[i]; - if (ctx[i] && cperf_testmap[opts.test].destructor) cperf_testmap[opts.test].destructor(ctx[i]); i++; @@ -607,7 +640,7 @@ err: for (i = 0; i < nb_cryptodevs && i < RTE_CRYPTO_MAX_DEVS; i++) rte_cryptodev_stop(enabled_cdevs[i]); - + rte_free(opts.imix_buffer_sizes); free_test_vector(t_vec, &opts); printf("\n"); diff --git a/app/test-eventdev/Makefile b/app/test-eventdev/Makefile index dcb2ac47..cb659110 100644 --- a/app/test-eventdev/Makefile +++ b/app/test-eventdev/Makefile @@ -1,37 +1,12 @@ -# BSD LICENSE +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Cavium, Inc # -# Copyright(c) 2017 Cavium, Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Cavium, Inc nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. include $(RTE_SDK)/mk/rte.vars.mk APP = dpdk-test-eventdev +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) @@ -51,4 +26,8 @@ SRCS-y += test_perf_common.c SRCS-y += test_perf_queue.c SRCS-y += test_perf_atq.c +SRCS-y += test_pipeline_common.c +SRCS-y += test_pipeline_queue.c +SRCS-y += test_pipeline_atq.c + include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h index 0fadab4a..21d3c103 100644 --- a/app/test-eventdev/evt_common.h +++ b/app/test-eventdev/evt_common.h @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #ifndef _EVT_COMMON_ @@ -94,41 +66,36 @@ evt_has_all_types_queue(uint8_t dev_id) } static inline int -evt_service_setup(uint8_t dev_id) +evt_service_setup(uint32_t service_id) { - uint32_t service_id; int32_t core_cnt; unsigned int lcore = 0; uint32_t core_array[RTE_MAX_LCORE]; uint8_t cnt; uint8_t min_cnt = UINT8_MAX; - if (evt_has_distributed_sched(dev_id)) - return 0; - if (!rte_service_lcore_count()) return -ENOENT; - if (!rte_event_dev_service_id_get(dev_id, &service_id)) { - core_cnt = rte_service_lcore_list(core_array, - RTE_MAX_LCORE); - if (core_cnt < 0) - return -ENOENT; - /* Get the core which has least number of services running. */ - while (core_cnt--) { - /* Reset default mapping */ - rte_service_map_lcore_set(service_id, - core_array[core_cnt], 0); - cnt = rte_service_lcore_count_services( - core_array[core_cnt]); - if (cnt < min_cnt) { - lcore = core_array[core_cnt]; - min_cnt = cnt; - } + core_cnt = rte_service_lcore_list(core_array, + RTE_MAX_LCORE); + if (core_cnt < 0) + return -ENOENT; + /* Get the core which has least number of services running. */ + while (core_cnt--) { + /* Reset default mapping */ + rte_service_map_lcore_set(service_id, + core_array[core_cnt], 0); + cnt = rte_service_lcore_count_services( + core_array[core_cnt]); + if (cnt < min_cnt) { + lcore = core_array[core_cnt]; + min_cnt = cnt; } - if (rte_service_map_lcore_set(service_id, lcore, 1)) - return -ENOENT; } + if (rte_service_map_lcore_set(service_id, lcore, 1)) + return -ENOENT; + return 0; } diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c index 1c3a7fae..57bb9457 100644 --- a/app/test-eventdev/evt_main.c +++ b/app/test-eventdev/evt_main.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include <stdio.h> @@ -57,6 +29,9 @@ signal_handler(int signum) rte_eal_mp_wait_lcore(); + if (test->ops.test_result) + test->ops.test_result(test, &opt); + if (test->ops.eventdev_destroy) test->ops.eventdev_destroy(test, &opt); diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c index e2187dfc..9683b222 100644 --- a/app/test-eventdev/evt_options.c +++ b/app/test-eventdev/evt_options.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include <stdio.h> @@ -55,6 +27,7 @@ evt_options_default(struct evt_options *opt) opt->pool_sz = 16 * 1024; opt->wkr_deq_dep = 16; opt->nb_pkts = (1ULL << 26); /* do ~64M packets */ + opt->prod_type = EVT_PROD_TYPE_SYNT; } typedef int (*option_parser_t)(struct evt_options *opt, @@ -107,6 +80,13 @@ evt_parse_queue_priority(struct evt_options *opt, const char *arg __rte_unused) } static int +evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused) +{ + opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR; + return 0; +} + +static int evt_parse_test_name(struct evt_options *opt, const char *arg) { snprintf(opt->test_name, EVT_TEST_NAME_MAX_LEN, "%s", arg); @@ -189,6 +169,7 @@ usage(char *program) "\t--worker_deq_depth : dequeue depth of the worker\n" "\t--fwd_latency : perform fwd_latency measurement\n" "\t--queue_priority : enable queue priority\n" + "\t--prod_type_ethdev : use ethernet device as producer\n." ); printf("available tests:\n"); evt_test_dump_names(); @@ -249,6 +230,7 @@ static struct option lgopts[] = { { EVT_SCHED_TYPE_LIST, 1, 0, 0 }, { EVT_FWD_LATENCY, 0, 0, 0 }, { EVT_QUEUE_PRIORITY, 0, 0, 0 }, + { EVT_PROD_ETHDEV, 0, 0, 0 }, { EVT_HELP, 0, 0, 0 }, { NULL, 0, 0, 0 } }; @@ -272,6 +254,7 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt) { EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list}, { EVT_FWD_LATENCY, evt_parse_fwd_latency}, { EVT_QUEUE_PRIORITY, evt_parse_queue_priority}, + { EVT_PROD_ETHDEV, evt_parse_eth_prod_type}, }; for (i = 0; i < RTE_DIM(parsermap); i++) { diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h index a9a91252..46d12222 100644 --- a/app/test-eventdev/evt_options.h +++ b/app/test-eventdev/evt_options.h @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #ifndef _EVT_OPTIONS_ @@ -58,8 +30,16 @@ #define EVT_SCHED_TYPE_LIST ("stlist") #define EVT_FWD_LATENCY ("fwd_latency") #define EVT_QUEUE_PRIORITY ("queue_priority") +#define EVT_PROD_ETHDEV ("prod_type_ethdev") #define EVT_HELP ("help") +enum evt_prod_type { + EVT_PROD_TYPE_NONE, + EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */ + EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */ + EVT_PROD_TYPE_MAX, +}; + struct evt_options { #define EVT_TEST_NAME_MAX_LEN 32 char test_name[EVT_TEST_NAME_MAX_LEN]; @@ -76,6 +56,7 @@ struct evt_options { uint8_t dev_id; uint32_t fwd_latency:1; uint32_t q_priority:1; + enum evt_prod_type prod_type; }; void evt_options_default(struct evt_options *opt); @@ -266,4 +247,24 @@ evt_dump_sched_type_list(struct evt_options *opt) evt_dump_end; } +#define EVT_PROD_MAX_NAME_LEN 50 +static inline void +evt_dump_producer_type(struct evt_options *opt) +{ + char name[EVT_PROD_MAX_NAME_LEN]; + + switch (opt->prod_type) { + default: + case EVT_PROD_TYPE_SYNT: + snprintf(name, EVT_PROD_MAX_NAME_LEN, + "Synthetic producer lcores"); + break; + case EVT_PROD_TYPE_ETH_RX_ADPTR: + snprintf(name, EVT_PROD_MAX_NAME_LEN, + "Ethdev Rx Adapter producers"); + break; + } + evt_dump("prod_type", "%s", name); +} + #endif /* _EVT_OPTIONS_ */ diff --git a/app/test-eventdev/evt_test.c b/app/test-eventdev/evt_test.c index 3a432233..72d6228b 100644 --- a/app/test-eventdev/evt_test.c +++ b/app/test-eventdev/evt_test.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc. 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include <stdio.h> diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h index 17bdd165..7477a325 100644 --- a/app/test-eventdev/evt_test.h +++ b/app/test-eventdev/evt_test.h @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #ifndef _EVT_TEST_ diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build new file mode 100644 index 00000000..7c373c87 --- /dev/null +++ b/app/test-eventdev/meson.build @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Cavium, Inc + +sources = files('evt_main.c', + 'evt_options.c', + 'evt_test.c', + 'parser.c', + 'test_order_common.c', + 'test_order_atq.c', + 'test_order_queue.c', + 'test_perf_common.c', + 'test_perf_atq.c', + 'test_perf_queue.c') + +dep_objs = [get_variable(get_option('default_library') + '_rte_eventdev')] +dep_objs += cc.find_library('execinfo', required: false) # BSD only + +link_libs = [] +if get_option('default_library') == 'static' + link_libs = dpdk_drivers +endif + +executable('dpdk-test-eventdev', + sources, + c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'], + link_whole: link_libs, + dependencies: dep_objs, + install_rpath: join_paths(get_option('prefix'), driver_install_path), + install: true) diff --git a/app/test-eventdev/parser.h b/app/test-eventdev/parser.h index 75a5a3b4..673ff22d 100644 --- a/app/test-eventdev/parser.h +++ b/app/test-eventdev/parser.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #ifndef __INCLUDE_PARSER_H__ diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c index 4ee0dea8..c57fbbfa 100644 --- a/app/test-eventdev/test_order_atq.c +++ b/app/test-eventdev/test_order_atq.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include <stdio.h> diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c index 7cfe7fac..8a342013 100644 --- a/app/test-eventdev/test_order_common.c +++ b/app/test-eventdev/test_order_common.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include "test_order_common.h" diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h index 57bc76e0..22a1cc83 100644 --- a/app/test-eventdev/test_order_common.h +++ b/app/test-eventdev/test_order_common.h @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #ifndef _TEST_ORDER_COMMON_ diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c index eef69a4c..f603a023 100644 --- a/app/test-eventdev/test_order_queue.c +++ b/app/test-eventdev/test_order_queue.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include <stdio.h> diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c index 0e9f2db0..b36b22a7 100644 --- a/app/test-eventdev/test_perf_atq.c +++ b/app/test-eventdev/test_perf_atq.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include "test_perf_common.h" @@ -38,7 +10,8 @@ static inline int atq_nb_event_queues(struct evt_options *opt) { /* nb_queues = number of producers */ - return evt_nr_active_lcores(opt->plcores); + return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? + rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores); } static inline __attribute__((always_inline)) void @@ -185,14 +158,32 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) { int ret; uint8_t queue; + uint8_t nb_queues; + uint8_t nb_ports; + struct rte_event_dev_info dev_info; + + nb_ports = evt_nr_active_lcores(opt->wlcores); + nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : + evt_nr_active_lcores(opt->plcores); + + nb_queues = atq_nb_event_queues(opt); + + memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); + ret = rte_event_dev_info_get(opt->dev_id, &dev_info); + if (ret) { + evt_err("failed to get eventdev info %d", opt->dev_id); + return ret; + } const struct rte_event_dev_config config = { - .nb_event_queues = atq_nb_event_queues(opt), - .nb_event_ports = perf_nb_event_ports(opt), - .nb_events_limit = 4096, + .nb_event_queues = nb_queues, + .nb_event_ports = nb_ports, + .nb_events_limit = dev_info.max_num_events, .nb_event_queue_flows = opt->nb_flows, - .nb_event_port_dequeue_depth = 128, - .nb_event_port_enqueue_depth = 128, + .nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth, + .nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth, }; ret = rte_event_dev_configure(opt->dev_id, &config); @@ -208,7 +199,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) .nb_atomic_order_sequences = opt->nb_flows, }; /* queue configurations */ - for (queue = 0; queue < atq_nb_event_queues(opt); queue++) { + for (queue = 0; queue < nb_queues; queue++) { ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); if (ret) { evt_err("failed to setup queue=%d", queue); @@ -216,15 +207,29 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) } } - ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, - atq_nb_event_queues(opt)); + if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) + opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; + + /* port configuration */ + const struct rte_event_port_conf p_conf = { + .dequeue_depth = opt->wkr_deq_dep, + .enqueue_depth = dev_info.max_event_port_dequeue_depth, + .new_event_threshold = dev_info.max_num_events, + }; + + ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues, + &p_conf); if (ret) return ret; - ret = evt_service_setup(opt->dev_id); - if (ret) { - evt_err("No service lcore found to run event dev."); - return ret; + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } } ret = rte_event_dev_start(opt->dev_id); @@ -271,11 +276,13 @@ static const struct evt_test_ops perf_atq = { .opt_check = perf_atq_opt_check, .opt_dump = perf_atq_opt_dump, .test_setup = perf_test_setup, + .ethdev_setup = perf_ethdev_setup, .mempool_setup = perf_mempool_setup, .eventdev_setup = perf_atq_eventdev_setup, .launch_lcores = perf_atq_launch_lcores, .eventdev_destroy = perf_eventdev_destroy, .mempool_destroy = perf_mempool_destroy, + .ethdev_destroy = perf_ethdev_destroy, .test_result = perf_test_result, .test_destroy = perf_test_destroy, }; diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c index e77b4723..59fa0a49 100644 --- a/app/test-eventdev/test_perf_common.c +++ b/app/test-eventdev/test_perf_common.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include "test_perf_common.h" @@ -36,8 +8,20 @@ int perf_test_result(struct evt_test *test, struct evt_options *opt) { RTE_SET_USED(opt); + int i; + uint64_t total = 0; struct test_perf *t = evt_test_priv(test); + printf("Packet distribution across worker cores :\n"); + for (i = 0; i < t->nb_workers; i++) + total += t->worker[i].processed_pkts; + for (i = 0; i < t->nb_workers; i++) + printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:" + CLGRN" %3.2f\n"CLNRM, i, + t->worker[i].processed_pkts, + (((double)t->worker[i].processed_pkts)/total) + * 100); + return t->result; } @@ -88,6 +72,17 @@ perf_producer(void *arg) return 0; } +static int +perf_producer_wrapper(void *arg) +{ + struct prod_data *p = arg; + struct test_perf *t = p->t; + /* Launch the producer function only in case of synthetic producer. */ + if (t->opt->prod_type == EVT_PROD_TYPE_SYNT) + return perf_producer(arg); + return 0; +} + static inline uint64_t processed_pkts(struct test_perf *t) { @@ -142,8 +137,8 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt, if (!(opt->plcores[lcore_id])) continue; - ret = rte_eal_remote_launch(perf_producer, &t->prod[port_idx], - lcore_id); + ret = rte_eal_remote_launch(perf_producer_wrapper, + &t->prod[port_idx], lcore_id); if (ret) { evt_err("failed to launch perf_producer %d", lcore_id); return ret; @@ -193,14 +188,17 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt, fflush(stdout); if (remaining <= 0) { - t->done = true; t->result = EVT_TEST_SUCCESS; - rte_smp_wmb(); - break; + if (opt->prod_type == EVT_PROD_TYPE_SYNT) { + t->done = true; + rte_smp_wmb(); + break; + } } } - if (new_cycles - dead_lock_cycles > dead_lock_sample) { + if (new_cycles - dead_lock_cycles > dead_lock_sample && + opt->prod_type == EVT_PROD_TYPE_SYNT) { remaining = t->outstand_pkts - processed_pkts(t); if (dead_lock_remaining == remaining) { rte_event_dev_dump(opt->dev_id, stdout); @@ -217,21 +215,83 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt, return 0; } +static int +perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, + struct rte_event_port_conf prod_conf) +{ + int ret = 0; + uint16_t prod; + struct rte_event_eth_rx_adapter_queue_conf queue_conf; + + memset(&queue_conf, 0, + sizeof(struct rte_event_eth_rx_adapter_queue_conf)); + queue_conf.ev.sched_type = opt->sched_type_list[0]; + for (prod = 0; prod < rte_eth_dev_count(); prod++) { + uint32_t cap; + + ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, + prod, &cap); + if (ret) { + evt_err("failed to get event rx adapter[%d]" + " capabilities", + opt->dev_id); + return ret; + } + queue_conf.ev.queue_id = prod * stride; + ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, + &prod_conf); + if (ret) { + evt_err("failed to create rx adapter[%d]", prod); + return ret; + } + ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, + &queue_conf); + if (ret) { + evt_err("failed to add rx queues to adapter[%d]", prod); + return ret; + } + + if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { + uint32_t service_id; + + rte_event_eth_rx_adapter_service_id_get(prod, + &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("Failed to setup service core" + " for Rx adapter\n"); + return ret; + } + } + + ret = rte_eth_dev_start(prod); + if (ret) { + evt_err("Ethernet dev [%d] failed to start." + " Using synthetic producer", prod); + return ret; + } + + ret = rte_event_eth_rx_adapter_start(prod); + if (ret) { + evt_err("Rx adapter[%d] start failed", prod); + return ret; + } + printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__, + prod, prod); + } + + return ret; +} + int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, - uint8_t stride, uint8_t nb_queues) + uint8_t stride, uint8_t nb_queues, + const struct rte_event_port_conf *port_conf) { struct test_perf *t = evt_test_priv(test); - uint8_t port, prod; + uint16_t port, prod; int ret = -1; - /* port configuration */ - const struct rte_event_port_conf wkr_p_conf = { - .dequeue_depth = opt->wkr_deq_dep, - .enqueue_depth = 64, - .new_event_threshold = 4096, - }; - /* setup one port per worker, linking to all queues */ for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) { @@ -243,7 +303,7 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, w->processed_pkts = 0; w->latency = 0; - ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf); + ret = rte_event_port_setup(opt->dev_id, port, port_conf); if (ret) { evt_err("failed to setup port %d", port); return ret; @@ -257,26 +317,33 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, } /* port for producers, no links */ - const struct rte_event_port_conf prod_conf = { - .dequeue_depth = 8, - .enqueue_depth = 32, - .new_event_threshold = 1200, - }; - prod = 0; - for ( ; port < perf_nb_event_ports(opt); port++) { - struct prod_data *p = &t->prod[port]; - - p->dev_id = opt->dev_id; - p->port_id = port; - p->queue_id = prod * stride; - p->t = t; + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { + for ( ; port < perf_nb_event_ports(opt); port++) { + struct prod_data *p = &t->prod[port]; + p->t = t; + } - ret = rte_event_port_setup(opt->dev_id, port, &prod_conf); - if (ret) { - evt_err("failed to setup port %d", port); + ret = perf_event_rx_adapter_setup(opt, stride, *port_conf); + if (ret) return ret; + } else { + prod = 0; + for ( ; port < perf_nb_event_ports(opt); port++) { + struct prod_data *p = &t->prod[port]; + + p->dev_id = opt->dev_id; + p->port_id = port; + p->queue_id = prod * stride; + p->t = t; + + ret = rte_event_port_setup(opt->dev_id, port, + port_conf); + if (ret) { + evt_err("failed to setup port %d", port); + return ret; + } + prod++; } - prod++; } return ret; @@ -287,8 +354,10 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues) { unsigned int lcores; - /* N producer + N worker + 1 master */ - lcores = 3; + /* N producer + N worker + 1 master when producer cores are used + * Else N worker + 1 master when Rx adapter is used + */ + lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2; if (rte_lcore_count() < lcores) { evt_err("test need minimum %d lcores", lcores); @@ -313,18 +382,21 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues) return -1; } - /* Validate producer lcores */ - if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) { - evt_err("producer lcores overlaps with master lcore"); - return -1; - } - if (evt_has_disabled_lcore(opt->plcores)) { - evt_err("one or more producer lcores are not enabled"); - return -1; - } - if (!evt_has_active_lcore(opt->plcores)) { - evt_err("minimum one producer is required"); - return -1; + if (opt->prod_type == EVT_PROD_TYPE_SYNT) { + /* Validate producer lcores */ + if (evt_lcores_has_overlap(opt->plcores, + rte_get_master_lcore())) { + evt_err("producer lcores overlaps with master lcore"); + return -1; + } + if (evt_has_disabled_lcore(opt->plcores)) { + evt_err("one or more producer lcores are not enabled"); + return -1; + } + if (!evt_has_active_lcore(opt->plcores)) { + evt_err("minimum one producer is required"); + return -1; + } } if (evt_has_invalid_stage(opt)) @@ -369,6 +441,7 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues) evt_dump("nb_evdev_queues", "%d", nb_queues); evt_dump_queue_priority(opt); evt_dump_sched_type_list(opt); + evt_dump_producer_type(opt); } void @@ -387,18 +460,108 @@ perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused, memset(obj, 0, mp->elt_size); } +#define NB_RX_DESC 128 +#define NB_TX_DESC 512 +int +perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) +{ + int i; + struct test_perf *t = evt_test_priv(test); + struct rte_eth_conf port_conf = { + .rxmode = { + .mq_mode = ETH_MQ_RX_RSS, + .max_rx_pkt_len = ETHER_MAX_LEN, + .split_hdr_size = 0, + .header_split = 0, + .hw_ip_checksum = 0, + .hw_vlan_filter = 0, + .hw_vlan_strip = 0, + .hw_vlan_extend = 0, + .jumbo_frame = 0, + .hw_strip_crc = 1, + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IP, + }, + }, + }; + + if (opt->prod_type == EVT_PROD_TYPE_SYNT) + return 0; + + if (!rte_eth_dev_count()) { + evt_err("No ethernet ports found."); + return -ENODEV; + } + + for (i = 0; i < rte_eth_dev_count(); i++) { + + if (rte_eth_dev_configure(i, 1, 1, + &port_conf) + < 0) { + evt_err("Failed to configure eth port [%d]", i); + return -EINVAL; + } + + if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, + rte_socket_id(), NULL, t->pool) < 0) { + evt_err("Failed to setup eth port [%d] rx_queue: %d.", + i, 0); + return -EINVAL; + } + + if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, + rte_socket_id(), NULL) < 0) { + evt_err("Failed to setup eth port [%d] tx_queue: %d.", + i, 0); + return -EINVAL; + } + + rte_eth_promiscuous_enable(i); + } + + return 0; +} + +void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt) +{ + int i; + RTE_SET_USED(test); + + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { + for (i = 0; i < rte_eth_dev_count(); i++) { + rte_event_eth_rx_adapter_stop(i); + rte_eth_dev_stop(i); + rte_eth_dev_close(i); + } + } +} + int perf_mempool_setup(struct evt_test *test, struct evt_options *opt) { struct test_perf *t = evt_test_priv(test); - t->pool = rte_mempool_create(test->name, /* mempool name */ + if (opt->prod_type == EVT_PROD_TYPE_SYNT) { + t->pool = rte_mempool_create(test->name, /* mempool name */ opt->pool_sz, /* number of elements*/ sizeof(struct perf_elt), /* element size*/ 512, /* cache size*/ 0, NULL, NULL, perf_elt_init, /* obj constructor */ NULL, opt->socket_id, 0); /* flags */ + } else { + t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ + opt->pool_sz, /* number of elements*/ + 512, /* cache size*/ + 0, + RTE_MBUF_DEFAULT_BUF_SIZE, + opt->socket_id); /* flags */ + + } + if (t->pool == NULL) { evt_err("failed to create mempool"); return -ENOMEM; diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h index c6fc70cd..9ad99733 100644 --- a/app/test-eventdev/test_perf_common.h +++ b/app/test-eventdev/test_perf_common.h @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #ifndef _TEST_PERF_COMMON_ @@ -38,7 +10,9 @@ #include <unistd.h> #include <rte_cycles.h> +#include <rte_ethdev.h> #include <rte_eventdev.h> +#include <rte_event_eth_rx_adapter.h> #include <rte_lcore.h> #include <rte_malloc.h> #include <rte_mempool.h> @@ -156,15 +130,18 @@ perf_nb_event_ports(struct evt_options *opt) int perf_test_result(struct evt_test *test, struct evt_options *opt); int perf_opt_check(struct evt_options *opt, uint64_t nb_queues); int perf_test_setup(struct evt_test *test, struct evt_options *opt); +int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt); int perf_mempool_setup(struct evt_test *test, struct evt_options *opt); int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, - uint8_t stride, uint8_t nb_queues); + uint8_t stride, uint8_t nb_queues, + const struct rte_event_port_conf *port_conf); int perf_event_dev_service_setup(uint8_t dev_id); int perf_launch_lcores(struct evt_test *test, struct evt_options *opt, int (*worker)(void *)); void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues); void perf_test_destroy(struct evt_test *test, struct evt_options *opt); void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt); +void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt); void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt); #endif /* _TEST_PERF_COMMON_ */ diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c index d843eea1..db8f2f3e 100644 --- a/app/test-eventdev/test_perf_queue.c +++ b/app/test-eventdev/test_perf_queue.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #include "test_perf_common.h" @@ -38,7 +10,9 @@ static inline int perf_queue_nb_event_queues(struct evt_options *opt) { /* nb_queues = number of producers * number of stages */ - return evt_nr_active_lcores(opt->plcores) * opt->nb_stages; + uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? + rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores); + return nb_prod * opt->nb_stages; } static inline __attribute__((always_inline)) void @@ -182,14 +156,32 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) uint8_t queue; int nb_stages = opt->nb_stages; int ret; + int nb_ports; + int nb_queues; + struct rte_event_dev_info dev_info; + + nb_ports = evt_nr_active_lcores(opt->wlcores); + nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : + evt_nr_active_lcores(opt->plcores); + + nb_queues = perf_queue_nb_event_queues(opt); + + memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); + ret = rte_event_dev_info_get(opt->dev_id, &dev_info); + if (ret) { + evt_err("failed to get eventdev info %d", opt->dev_id); + return ret; + } const struct rte_event_dev_config config = { - .nb_event_queues = perf_queue_nb_event_queues(opt), - .nb_event_ports = perf_nb_event_ports(opt), - .nb_events_limit = 4096, + .nb_event_queues = nb_queues, + .nb_event_ports = nb_ports, + .nb_events_limit = dev_info.max_num_events, .nb_event_queue_flows = opt->nb_flows, - .nb_event_port_dequeue_depth = 128, - .nb_event_port_enqueue_depth = 128, + .nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth, + .nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth, }; ret = rte_event_dev_configure(opt->dev_id, &config); @@ -204,7 +196,7 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) .nb_atomic_order_sequences = opt->nb_flows, }; /* queue configurations */ - for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) { + for (queue = 0; queue < nb_queues; queue++) { q_conf.schedule_type = (opt->sched_type_list[queue % nb_stages]); @@ -227,15 +219,29 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) } } + if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) + opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; + + /* port configuration */ + const struct rte_event_port_conf p_conf = { + .dequeue_depth = opt->wkr_deq_dep, + .enqueue_depth = dev_info.max_event_port_dequeue_depth, + .new_event_threshold = dev_info.max_num_events, + }; + ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */, - perf_queue_nb_event_queues(opt)); + nb_queues, &p_conf); if (ret) return ret; - ret = evt_service_setup(opt->dev_id); - if (ret) { - evt_err("No service lcore found to run event dev."); - return ret; + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } } ret = rte_event_dev_start(opt->dev_id); @@ -283,10 +289,12 @@ static const struct evt_test_ops perf_queue = { .opt_dump = perf_queue_opt_dump, .test_setup = perf_test_setup, .mempool_setup = perf_mempool_setup, + .ethdev_setup = perf_ethdev_setup, .eventdev_setup = perf_queue_eventdev_setup, .launch_lcores = perf_queue_launch_lcores, .eventdev_destroy = perf_eventdev_destroy, .mempool_destroy = perf_mempool_destroy, + .ethdev_destroy = perf_ethdev_destroy, .test_result = perf_test_result, .test_destroy = perf_test_destroy, }; diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c new file mode 100644 index 00000000..dd718977 --- /dev/null +++ b/app/test-eventdev/test_pipeline_atq.c @@ -0,0 +1,504 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 Cavium, Inc. + */ + +#include "test_pipeline_common.h" + +/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */ + +static __rte_always_inline int +pipeline_atq_nb_event_queues(struct evt_options *opt) +{ + RTE_SET_USED(opt); + + return rte_eth_dev_count(); +} + +static int +pipeline_atq_worker_single_stage_tx(void *arg) +{ + PIPELINE_WROKER_SINGLE_STAGE_INIT; + + while (t->done == false) { + uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!event) { + rte_pause(); + continue; + } + + if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + pipeline_tx_pkt(ev.mbuf); + w->processed_pkts++; + continue; + } + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); + pipeline_event_enqueue(dev, port, &ev); + } + + return 0; +} + +static int +pipeline_atq_worker_single_stage_fwd(void *arg) +{ + PIPELINE_WROKER_SINGLE_STAGE_INIT; + const uint8_t tx_queue = t->tx_service.queue_id; + + while (t->done == false) { + uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!event) { + rte_pause(); + continue; + } + + w->processed_pkts++; + ev.queue_id = tx_queue; + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); + pipeline_event_enqueue(dev, port, &ev); + } + + return 0; +} + +static int +pipeline_atq_worker_single_stage_burst_tx(void *arg) +{ + PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT; + + while (t->done == false) { + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + + if (!nb_rx) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + rte_prefetch0(ev[i + 1].mbuf); + if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) { + + pipeline_tx_pkt(ev[i].mbuf); + ev[i].op = RTE_EVENT_OP_RELEASE; + w->processed_pkts++; + } else + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + + return 0; +} + +static int +pipeline_atq_worker_single_stage_burst_fwd(void *arg) +{ + PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT; + const uint8_t tx_queue = t->tx_service.queue_id; + + while (t->done == false) { + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + + if (!nb_rx) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + rte_prefetch0(ev[i + 1].mbuf); + ev[i].queue_id = tx_queue; + pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC); + w->processed_pkts++; + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + + return 0; +} + +static int +pipeline_atq_worker_multi_stage_tx(void *arg) +{ + PIPELINE_WROKER_MULTI_STAGE_INIT; + const uint8_t nb_stages = t->opt->nb_stages; + + + while (t->done == false) { + uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!event) { + rte_pause(); + continue; + } + + cq_id = ev.sub_event_type % nb_stages; + + if (cq_id == last_queue) { + if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + + pipeline_tx_pkt(ev.mbuf); + w->processed_pkts++; + continue; + } + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); + } else { + ev.sub_event_type++; + pipeline_fwd_event(&ev, sched_type_list[cq_id]); + } + + pipeline_event_enqueue(dev, port, &ev); + } + return 0; +} + +static int +pipeline_atq_worker_multi_stage_fwd(void *arg) +{ + PIPELINE_WROKER_MULTI_STAGE_INIT; + const uint8_t nb_stages = t->opt->nb_stages; + const uint8_t tx_queue = t->tx_service.queue_id; + + while (t->done == false) { + uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!event) { + rte_pause(); + continue; + } + + cq_id = ev.sub_event_type % nb_stages; + + if (cq_id == last_queue) { + w->processed_pkts++; + ev.queue_id = tx_queue; + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); + } else { + ev.sub_event_type++; + pipeline_fwd_event(&ev, sched_type_list[cq_id]); + } + + pipeline_event_enqueue(dev, port, &ev); + } + return 0; +} + +static int +pipeline_atq_worker_multi_stage_burst_tx(void *arg) +{ + PIPELINE_WROKER_MULTI_STAGE_BURST_INIT; + const uint8_t nb_stages = t->opt->nb_stages; + + while (t->done == false) { + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + + if (!nb_rx) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + rte_prefetch0(ev[i + 1].mbuf); + cq_id = ev[i].sub_event_type % nb_stages; + + if (cq_id == last_queue) { + if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) { + + pipeline_tx_pkt(ev[i].mbuf); + ev[i].op = RTE_EVENT_OP_RELEASE; + w->processed_pkts++; + continue; + } + + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); + } else { + ev[i].sub_event_type++; + pipeline_fwd_event(&ev[i], + sched_type_list[cq_id]); + } + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + return 0; +} + +static int +pipeline_atq_worker_multi_stage_burst_fwd(void *arg) +{ + PIPELINE_WROKER_MULTI_STAGE_BURST_INIT; + const uint8_t nb_stages = t->opt->nb_stages; + const uint8_t tx_queue = t->tx_service.queue_id; + + while (t->done == false) { + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + + if (!nb_rx) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + rte_prefetch0(ev[i + 1].mbuf); + cq_id = ev[i].sub_event_type % nb_stages; + + if (cq_id == last_queue) { + w->processed_pkts++; + ev[i].queue_id = tx_queue; + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); + } else { + ev[i].sub_event_type++; + pipeline_fwd_event(&ev[i], + sched_type_list[cq_id]); + } + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + return 0; +} + +static int +worker_wrapper(void *arg) +{ + struct worker_data *w = arg; + struct evt_options *opt = w->t->opt; + const bool burst = evt_has_burst_mode(w->dev_id); + const bool mt_safe = !w->t->mt_unsafe; + const uint8_t nb_stages = opt->nb_stages; + RTE_SET_USED(opt); + + if (nb_stages == 1) { + if (!burst && mt_safe) + return pipeline_atq_worker_single_stage_tx(arg); + else if (!burst && !mt_safe) + return pipeline_atq_worker_single_stage_fwd(arg); + else if (burst && mt_safe) + return pipeline_atq_worker_single_stage_burst_tx(arg); + else if (burst && !mt_safe) + return pipeline_atq_worker_single_stage_burst_fwd(arg); + } else { + if (!burst && mt_safe) + return pipeline_atq_worker_multi_stage_tx(arg); + else if (!burst && !mt_safe) + return pipeline_atq_worker_multi_stage_fwd(arg); + if (burst && mt_safe) + return pipeline_atq_worker_multi_stage_burst_tx(arg); + else if (burst && !mt_safe) + return pipeline_atq_worker_multi_stage_burst_fwd(arg); + } + rte_panic("invalid worker\n"); +} + +static int +pipeline_atq_launch_lcores(struct evt_test *test, struct evt_options *opt) +{ + struct test_pipeline *t = evt_test_priv(test); + + if (t->mt_unsafe) + rte_service_component_runstate_set(t->tx_service.service_id, 1); + return pipeline_launch_lcores(test, opt, worker_wrapper); +} + +static int +pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) +{ + int ret; + int nb_ports; + int nb_queues; + uint8_t queue; + struct rte_event_dev_info info; + struct test_pipeline *t = evt_test_priv(test); + uint8_t tx_evqueue_id = 0; + uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint8_t nb_worker_queues = 0; + + nb_ports = evt_nr_active_lcores(opt->wlcores); + nb_queues = rte_eth_dev_count(); + + /* One extra port and queueu for Tx service */ + if (t->mt_unsafe) { + tx_evqueue_id = nb_queues; + nb_ports++; + nb_queues++; + } + + + rte_event_dev_info_get(opt->dev_id, &info); + + const struct rte_event_dev_config config = { + .nb_event_queues = nb_queues, + .nb_event_ports = nb_ports, + .nb_events_limit = info.max_num_events, + .nb_event_queue_flows = opt->nb_flows, + .nb_event_port_dequeue_depth = + info.max_event_port_dequeue_depth, + .nb_event_port_enqueue_depth = + info.max_event_port_enqueue_depth, + }; + ret = rte_event_dev_configure(opt->dev_id, &config); + if (ret) { + evt_err("failed to configure eventdev %d", opt->dev_id); + return ret; + } + + struct rte_event_queue_conf q_conf = { + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .nb_atomic_flows = opt->nb_flows, + .nb_atomic_order_sequences = opt->nb_flows, + }; + /* queue configurations */ + for (queue = 0; queue < nb_queues; queue++) { + q_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; + + if (t->mt_unsafe) { + if (queue == tx_evqueue_id) { + q_conf.event_queue_cfg = + RTE_EVENT_QUEUE_CFG_SINGLE_LINK; + } else { + queue_arr[nb_worker_queues] = queue; + nb_worker_queues++; + } + } + + ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); + if (ret) { + evt_err("failed to setup queue=%d", queue); + return ret; + } + } + + if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth) + opt->wkr_deq_dep = info.max_event_port_dequeue_depth; + + /* port configuration */ + const struct rte_event_port_conf p_conf = { + .dequeue_depth = opt->wkr_deq_dep, + .enqueue_depth = info.max_event_port_dequeue_depth, + .new_event_threshold = info.max_num_events, + }; + + if (t->mt_unsafe) { + ret = pipeline_event_port_setup(test, opt, queue_arr, + nb_worker_queues, p_conf); + if (ret) + return ret; + + ret = pipeline_event_tx_service_setup(test, opt, tx_evqueue_id, + nb_ports - 1, p_conf); + } else + ret = pipeline_event_port_setup(test, opt, NULL, nb_queues, + p_conf); + + if (ret) + return ret; + + /* + * The pipelines are setup in the following manner: + * + * eth_dev_count = 2, nb_stages = 2, atq mode + * + * Multi thread safe : + * queues = 2 + * stride = 1 + * + * event queue pipelines: + * eth0 -> q0 ->tx + * eth1 -> q1 ->tx + * + * q0, q1 are configured as ATQ so, all the different stages can + * be enqueued on the same queue. + * + * Multi thread unsafe : + * queues = 3 + * stride = 1 + * + * event queue pipelines: + * eth0 -> q0 + * } (q3->tx) Tx service + * eth1 -> q1 + * + * q0,q1 are configured as stated above. + * q3 configured as SINGLE_LINK|ATOMIC. + */ + ret = pipeline_event_rx_adapter_setup(opt, 1, p_conf); + if (ret) + return ret; + + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } + } + + ret = rte_event_dev_start(opt->dev_id); + if (ret) { + evt_err("failed to start eventdev %d", opt->dev_id); + return ret; + } + + return 0; +} + +static void +pipeline_atq_opt_dump(struct evt_options *opt) +{ + pipeline_opt_dump(opt, pipeline_atq_nb_event_queues(opt)); +} + +static int +pipeline_atq_opt_check(struct evt_options *opt) +{ + return pipeline_opt_check(opt, pipeline_atq_nb_event_queues(opt)); +} + +static bool +pipeline_atq_capability_check(struct evt_options *opt) +{ + struct rte_event_dev_info dev_info; + + rte_event_dev_info_get(opt->dev_id, &dev_info); + if (dev_info.max_event_queues < pipeline_atq_nb_event_queues(opt) || + dev_info.max_event_ports < + evt_nr_active_lcores(opt->wlcores)) { + evt_err("not enough eventdev queues=%d/%d or ports=%d/%d", + pipeline_atq_nb_event_queues(opt), + dev_info.max_event_queues, + evt_nr_active_lcores(opt->wlcores), + dev_info.max_event_ports); + } + + return true; +} + +static const struct evt_test_ops pipeline_atq = { + .cap_check = pipeline_atq_capability_check, + .opt_check = pipeline_atq_opt_check, + .opt_dump = pipeline_atq_opt_dump, + .test_setup = pipeline_test_setup, + .mempool_setup = pipeline_mempool_setup, + .ethdev_setup = pipeline_ethdev_setup, + .eventdev_setup = pipeline_atq_eventdev_setup, + .launch_lcores = pipeline_atq_launch_lcores, + .eventdev_destroy = pipeline_eventdev_destroy, + .mempool_destroy = pipeline_mempool_destroy, + .ethdev_destroy = pipeline_ethdev_destroy, + .test_result = pipeline_test_result, + .test_destroy = pipeline_test_destroy, +}; + +EVT_TEST_REGISTER(pipeline_atq); diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c new file mode 100644 index 00000000..6cad9357 --- /dev/null +++ b/app/test-eventdev/test_pipeline_common.c @@ -0,0 +1,548 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 Cavium, Inc. + */ + +#include "test_pipeline_common.h" + +static int32_t +pipeline_event_tx_burst_service_func(void *args) +{ + + int i; + struct tx_service_data *tx = args; + const uint8_t dev = tx->dev_id; + const uint8_t port = tx->port_id; + struct rte_event ev[BURST_SIZE + 1]; + + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0); + + if (!nb_rx) { + for (i = 0; i < tx->nb_ethports; i++) + rte_eth_tx_buffer_flush(i, 0, tx->tx_buf[i]); + return 0; + } + + for (i = 0; i < nb_rx; i++) { + struct rte_mbuf *m = ev[i].mbuf; + rte_eth_tx_buffer(m->port, 0, tx->tx_buf[m->port], m); + } + tx->processed_pkts += nb_rx; + + return 0; +} + +static int32_t +pipeline_event_tx_service_func(void *args) +{ + + int i; + struct tx_service_data *tx = args; + const uint8_t dev = tx->dev_id; + const uint8_t port = tx->port_id; + struct rte_event ev; + + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!nb_rx) { + for (i = 0; i < tx->nb_ethports; i++) + rte_eth_tx_buffer_flush(i, 0, tx->tx_buf[i]); + return 0; + } + + struct rte_mbuf *m = ev.mbuf; + rte_eth_tx_buffer(m->port, 0, tx->tx_buf[m->port], m); + tx->processed_pkts++; + + return 0; +} + +int +pipeline_test_result(struct evt_test *test, struct evt_options *opt) +{ + RTE_SET_USED(opt); + int i; + uint64_t total = 0; + struct test_pipeline *t = evt_test_priv(test); + + printf("Packet distribution across worker cores :\n"); + for (i = 0; i < t->nb_workers; i++) + total += t->worker[i].processed_pkts; + for (i = 0; i < t->nb_workers; i++) + printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:" + CLGRN" %3.2f\n"CLNRM, i, + t->worker[i].processed_pkts, + (((double)t->worker[i].processed_pkts)/total) + * 100); + return t->result; +} + +void +pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) +{ + evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); + evt_dump_worker_lcores(opt); + evt_dump_nb_stages(opt); + evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt)); + evt_dump("nb_evdev_queues", "%d", nb_queues); + evt_dump_queue_priority(opt); + evt_dump_sched_type_list(opt); + evt_dump_producer_type(opt); +} + +static inline uint64_t +processed_pkts(struct test_pipeline *t) +{ + uint8_t i; + uint64_t total = 0; + + rte_smp_rmb(); + if (t->mt_unsafe) + total = t->tx_service.processed_pkts; + else + for (i = 0; i < t->nb_workers; i++) + total += t->worker[i].processed_pkts; + + return total; +} + +int +pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, + int (*worker)(void *)) +{ + int ret, lcore_id; + struct test_pipeline *t = evt_test_priv(test); + + int port_idx = 0; + /* launch workers */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (!(opt->wlcores[lcore_id])) + continue; + + ret = rte_eal_remote_launch(worker, + &t->worker[port_idx], lcore_id); + if (ret) { + evt_err("failed to launch worker %d", lcore_id); + return ret; + } + port_idx++; + } + + uint64_t perf_cycles = rte_get_timer_cycles(); + const uint64_t perf_sample = rte_get_timer_hz(); + + static float total_mpps; + static uint64_t samples; + + uint64_t prev_pkts = 0; + + while (t->done == false) { + const uint64_t new_cycles = rte_get_timer_cycles(); + + if ((new_cycles - perf_cycles) > perf_sample) { + const uint64_t curr_pkts = processed_pkts(t); + + float mpps = (float)(curr_pkts - prev_pkts)/1000000; + + prev_pkts = curr_pkts; + perf_cycles = new_cycles; + total_mpps += mpps; + ++samples; + printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, + mpps, total_mpps/samples); + fflush(stdout); + } + } + printf("\n"); + return 0; +} + +int +pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) +{ + unsigned int lcores; + /* + * N worker + 1 master + */ + lcores = 2; + + if (!rte_eth_dev_count()) { + evt_err("test needs minimum 1 ethernet dev"); + return -1; + } + + if (rte_lcore_count() < lcores) { + evt_err("test need minimum %d lcores", lcores); + return -1; + } + + /* Validate worker lcores */ + if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { + evt_err("worker lcores overlaps with master lcore"); + return -1; + } + if (evt_has_disabled_lcore(opt->wlcores)) { + evt_err("one or more workers lcores are not enabled"); + return -1; + } + if (!evt_has_active_lcore(opt->wlcores)) { + evt_err("minimum one worker is required"); + return -1; + } + + if (nb_queues > EVT_MAX_QUEUES) { + evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); + return -1; + } + if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) { + evt_err("number of ports exceeds %d", EVT_MAX_PORTS); + return -1; + } + + if (evt_has_invalid_stage(opt)) + return -1; + + if (evt_has_invalid_sched_type(opt)) + return -1; + + return 0; +} + +#define NB_RX_DESC 128 +#define NB_TX_DESC 512 +int +pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) +{ + int i; + uint8_t nb_queues = 1; + uint8_t mt_state = 0; + struct test_pipeline *t = evt_test_priv(test); + struct rte_eth_rxconf rx_conf; + struct rte_eth_conf port_conf = { + .rxmode = { + .mq_mode = ETH_MQ_RX_RSS, + .max_rx_pkt_len = ETHER_MAX_LEN, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, + .ignore_offload_bitfield = 1, + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IP, + }, + }, + }; + + RTE_SET_USED(opt); + if (!rte_eth_dev_count()) { + evt_err("No ethernet ports found.\n"); + return -ENODEV; + } + + for (i = 0; i < rte_eth_dev_count(); i++) { + struct rte_eth_dev_info dev_info; + + memset(&dev_info, 0, sizeof(struct rte_eth_dev_info)); + rte_eth_dev_info_get(i, &dev_info); + mt_state = !(dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_MT_LOCKFREE); + rx_conf = dev_info.default_rxconf; + rx_conf.offloads = port_conf.rxmode.offloads; + + if (rte_eth_dev_configure(i, nb_queues, nb_queues, + &port_conf) + < 0) { + evt_err("Failed to configure eth port [%d]\n", i); + return -EINVAL; + } + + if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, + rte_socket_id(), &rx_conf, t->pool) < 0) { + evt_err("Failed to setup eth port [%d] rx_queue: %d.\n", + i, 0); + return -EINVAL; + } + if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, + rte_socket_id(), NULL) < 0) { + evt_err("Failed to setup eth port [%d] tx_queue: %d.\n", + i, 0); + return -EINVAL; + } + + t->mt_unsafe |= mt_state; + t->tx_service.tx_buf[i] = + rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(BURST_SIZE), 0); + if (t->tx_service.tx_buf[i] == NULL) + rte_panic("Unable to allocate Tx buffer memory."); + rte_eth_promiscuous_enable(i); + } + + return 0; +} + +int +pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, + uint8_t *queue_arr, uint8_t nb_queues, + const struct rte_event_port_conf p_conf) +{ + int i; + int ret; + uint8_t port; + struct test_pipeline *t = evt_test_priv(test); + + + /* setup one port per worker, linking to all queues */ + for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) { + struct worker_data *w = &t->worker[port]; + + w->dev_id = opt->dev_id; + w->port_id = port; + w->t = t; + w->processed_pkts = 0; + + ret = rte_event_port_setup(opt->dev_id, port, &p_conf); + if (ret) { + evt_err("failed to setup port %d", port); + return ret; + } + + if (queue_arr == NULL) { + if (rte_event_port_link(opt->dev_id, port, NULL, NULL, + 0) != nb_queues) + goto link_fail; + } else { + for (i = 0; i < nb_queues; i++) { + if (rte_event_port_link(opt->dev_id, port, + &queue_arr[i], NULL, 1) != 1) + goto link_fail; + } + } + } + + return 0; + +link_fail: + evt_err("failed to link all queues to port %d", port); + return -EINVAL; +} + +int +pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, + struct rte_event_port_conf prod_conf) +{ + int ret = 0; + uint16_t prod; + struct rte_event_eth_rx_adapter_queue_conf queue_conf; + + memset(&queue_conf, 0, + sizeof(struct rte_event_eth_rx_adapter_queue_conf)); + queue_conf.ev.sched_type = opt->sched_type_list[0]; + for (prod = 0; prod < rte_eth_dev_count(); prod++) { + uint32_t cap; + + ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, + prod, &cap); + if (ret) { + evt_err("failed to get event rx adapter[%d]" + " capabilities", + opt->dev_id); + return ret; + } + queue_conf.ev.queue_id = prod * stride; + ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, + &prod_conf); + if (ret) { + evt_err("failed to create rx adapter[%d]", prod); + return ret; + } + ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, + &queue_conf); + if (ret) { + evt_err("failed to add rx queues to adapter[%d]", prod); + return ret; + } + + if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { + uint32_t service_id; + + rte_event_eth_rx_adapter_service_id_get(prod, + &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("Failed to setup service core" + " for Rx adapter\n"); + return ret; + } + } + + ret = rte_eth_dev_start(prod); + if (ret) { + evt_err("Ethernet dev [%d] failed to start." + " Using synthetic producer", prod); + return ret; + } + + ret = rte_event_eth_rx_adapter_start(prod); + if (ret) { + evt_err("Rx adapter[%d] start failed", prod); + return ret; + } + printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__, + prod, prod); + } + + return ret; +} + +int +pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt, + uint8_t tx_queue_id, uint8_t tx_port_id, + const struct rte_event_port_conf p_conf) +{ + int ret; + struct rte_service_spec serv; + struct test_pipeline *t = evt_test_priv(test); + struct tx_service_data *tx = &t->tx_service; + + ret = rte_event_port_setup(opt->dev_id, tx_port_id, &p_conf); + if (ret) { + evt_err("failed to setup port %d", tx_port_id); + return ret; + } + + if (rte_event_port_link(opt->dev_id, tx_port_id, &tx_queue_id, + NULL, 1) != 1) { + evt_err("failed to link queues to port %d", tx_port_id); + return -EINVAL; + } + + tx->dev_id = opt->dev_id; + tx->queue_id = tx_queue_id; + tx->port_id = tx_port_id; + tx->nb_ethports = rte_eth_dev_count(); + tx->t = t; + + /* Register Tx service */ + memset(&serv, 0, sizeof(struct rte_service_spec)); + snprintf(serv.name, sizeof(serv.name), "Tx_service"); + + if (evt_has_burst_mode(opt->dev_id)) + serv.callback = pipeline_event_tx_burst_service_func; + else + serv.callback = pipeline_event_tx_service_func; + + serv.callback_userdata = (void *)tx; + ret = rte_service_component_register(&serv, &tx->service_id); + if (ret) { + evt_err("failed to register Tx service"); + return ret; + } + + ret = evt_service_setup(tx->service_id); + if (ret) { + evt_err("Failed to setup service core for Tx service\n"); + return ret; + } + + rte_service_runstate_set(tx->service_id, 1); + + return 0; +} + + +void +pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) +{ + int i; + RTE_SET_USED(test); + RTE_SET_USED(opt); + struct test_pipeline *t = evt_test_priv(test); + + if (t->mt_unsafe) { + rte_service_component_runstate_set(t->tx_service.service_id, 0); + rte_service_runstate_set(t->tx_service.service_id, 0); + rte_service_component_unregister(t->tx_service.service_id); + } + + for (i = 0; i < rte_eth_dev_count(); i++) { + rte_event_eth_rx_adapter_stop(i); + rte_eth_dev_stop(i); + rte_eth_dev_close(i); + } +} + +void +pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt) +{ + RTE_SET_USED(test); + + rte_event_dev_stop(opt->dev_id); + rte_event_dev_close(opt->dev_id); +} + +int +pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) +{ + struct test_pipeline *t = evt_test_priv(test); + + t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ + opt->pool_sz, /* number of elements*/ + 512, /* cache size*/ + 0, + RTE_MBUF_DEFAULT_BUF_SIZE, + opt->socket_id); /* flags */ + + if (t->pool == NULL) { + evt_err("failed to create mempool"); + return -ENOMEM; + } + + return 0; +} + +void +pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt) +{ + RTE_SET_USED(opt); + struct test_pipeline *t = evt_test_priv(test); + + rte_mempool_free(t->pool); +} + +int +pipeline_test_setup(struct evt_test *test, struct evt_options *opt) +{ + void *test_pipeline; + + test_pipeline = rte_zmalloc_socket(test->name, + sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE, + opt->socket_id); + if (test_pipeline == NULL) { + evt_err("failed to allocate test_pipeline memory"); + goto nomem; + } + test->test_priv = test_pipeline; + + struct test_pipeline *t = evt_test_priv(test); + + t->nb_workers = evt_nr_active_lcores(opt->wlcores); + t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores); + t->done = false; + t->nb_flows = opt->nb_flows; + t->result = EVT_TEST_FAILED; + t->opt = opt; + opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR; + memcpy(t->sched_type_list, opt->sched_type_list, + sizeof(opt->sched_type_list)); + return 0; +nomem: + return -ENOMEM; +} + +void +pipeline_test_destroy(struct evt_test *test, struct evt_options *opt) +{ + RTE_SET_USED(opt); + + rte_free(test->test_priv); +} diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h new file mode 100644 index 00000000..5fb91607 --- /dev/null +++ b/app/test-eventdev/test_pipeline_common.h @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 Cavium, Inc. + */ + +#ifndef _TEST_PIPELINE_COMMON_ +#define _TEST_PIPELINE_COMMON_ + +#include <stdio.h> +#include <stdbool.h> +#include <unistd.h> + +#include <rte_cycles.h> +#include <rte_ethdev.h> +#include <rte_eventdev.h> +#include <rte_event_eth_rx_adapter.h> +#include <rte_lcore.h> +#include <rte_malloc.h> +#include <rte_mempool.h> +#include <rte_prefetch.h> +#include <rte_spinlock.h> +#include <rte_service.h> +#include <rte_service_component.h> + +#include "evt_common.h" +#include "evt_options.h" +#include "evt_test.h" + +struct test_pipeline; + +struct worker_data { + uint64_t processed_pkts; + uint8_t dev_id; + uint8_t port_id; + struct test_pipeline *t; +} __rte_cache_aligned; + +struct tx_service_data { + uint8_t dev_id; + uint8_t queue_id; + uint8_t port_id; + uint32_t service_id; + uint64_t processed_pkts; + uint16_t nb_ethports; + struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS]; + struct test_pipeline *t; +} __rte_cache_aligned; + +struct test_pipeline { + /* Don't change the offset of "done". Signal handler use this memory + * to terminate all lcores work. + */ + int done; + uint8_t nb_workers; + uint8_t mt_unsafe; + enum evt_test_result result; + uint32_t nb_flows; + uint64_t outstand_pkts; + struct rte_mempool *pool; + struct worker_data worker[EVT_MAX_PORTS]; + struct tx_service_data tx_service; + struct evt_options *opt; + uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned; +} __rte_cache_aligned; + +#define BURST_SIZE 16 + +#define PIPELINE_WROKER_SINGLE_STAGE_INIT \ + struct worker_data *w = arg; \ + struct test_pipeline *t = w->t; \ + const uint8_t dev = w->dev_id; \ + const uint8_t port = w->port_id; \ + struct rte_event ev + +#define PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT \ + int i; \ + struct worker_data *w = arg; \ + struct test_pipeline *t = w->t; \ + const uint8_t dev = w->dev_id; \ + const uint8_t port = w->port_id; \ + struct rte_event ev[BURST_SIZE + 1] + +#define PIPELINE_WROKER_MULTI_STAGE_INIT \ + struct worker_data *w = arg; \ + struct test_pipeline *t = w->t; \ + uint8_t cq_id; \ + const uint8_t dev = w->dev_id; \ + const uint8_t port = w->port_id; \ + const uint8_t last_queue = t->opt->nb_stages - 1; \ + uint8_t *const sched_type_list = &t->sched_type_list[0]; \ + struct rte_event ev + +#define PIPELINE_WROKER_MULTI_STAGE_BURST_INIT \ + int i; \ + struct worker_data *w = arg; \ + struct test_pipeline *t = w->t; \ + uint8_t cq_id; \ + const uint8_t dev = w->dev_id; \ + const uint8_t port = w->port_id; \ + const uint8_t last_queue = t->opt->nb_stages - 1; \ + uint8_t *const sched_type_list = &t->sched_type_list[0]; \ + struct rte_event ev[BURST_SIZE + 1] + +static __rte_always_inline void +pipeline_fwd_event(struct rte_event *ev, uint8_t sched) +{ + ev->event_type = RTE_EVENT_TYPE_CPU; + ev->op = RTE_EVENT_OP_FORWARD; + ev->sched_type = sched; +} + +static __rte_always_inline void +pipeline_event_enqueue(const uint8_t dev, const uint8_t port, + struct rte_event *ev) +{ + while (rte_event_enqueue_burst(dev, port, ev, 1) != 1) + rte_pause(); +} + +static __rte_always_inline void +pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port, + struct rte_event *ev, const uint16_t nb_rx) +{ + uint16_t enq; + + enq = rte_event_enqueue_burst(dev, port, ev, nb_rx); + while (enq < nb_rx) { + enq += rte_event_enqueue_burst(dev, port, + ev + enq, nb_rx - enq); + } +} + +static __rte_always_inline void +pipeline_tx_pkt(struct rte_mbuf *mbuf) +{ + while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1) + rte_pause(); +} + +static inline int +pipeline_nb_event_ports(struct evt_options *opt) +{ + return evt_nr_active_lcores(opt->wlcores); +} + +int pipeline_test_result(struct evt_test *test, struct evt_options *opt); +int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues); +int pipeline_test_setup(struct evt_test *test, struct evt_options *opt); +int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt); +int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, + struct rte_event_port_conf prod_conf); +int pipeline_event_tx_service_setup(struct evt_test *test, + struct evt_options *opt, uint8_t tx_queue_id, + uint8_t tx_port_id, const struct rte_event_port_conf p_conf); +int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt); +int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, + uint8_t *queue_arr, uint8_t nb_queues, + const struct rte_event_port_conf p_conf); +int pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, + int (*worker)(void *)); +void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues); +void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt); +void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt); +void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt); +void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt); + +#endif /* _TEST_PIPELINE_COMMON_ */ diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c new file mode 100644 index 00000000..02fc27cf --- /dev/null +++ b/app/test-eventdev/test_pipeline_queue.c @@ -0,0 +1,526 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 Cavium, Inc. + */ + +#include "test_pipeline_common.h" + +/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */ + +static __rte_always_inline int +pipeline_queue_nb_event_queues(struct evt_options *opt) +{ + uint16_t eth_count = rte_eth_dev_count(); + + return (eth_count * opt->nb_stages) + eth_count; +} + +static int +pipeline_queue_worker_single_stage_tx(void *arg) +{ + PIPELINE_WROKER_SINGLE_STAGE_INIT; + + while (t->done == false) { + uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!event) { + rte_pause(); + continue; + } + + if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + pipeline_tx_pkt(ev.mbuf); + w->processed_pkts++; + } else { + ev.queue_id++; + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); + pipeline_event_enqueue(dev, port, &ev); + } + } + + return 0; +} + +static int +pipeline_queue_worker_single_stage_fwd(void *arg) +{ + PIPELINE_WROKER_SINGLE_STAGE_INIT; + const uint8_t tx_queue = t->tx_service.queue_id; + + while (t->done == false) { + uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!event) { + rte_pause(); + continue; + } + + ev.queue_id = tx_queue; + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); + pipeline_event_enqueue(dev, port, &ev); + w->processed_pkts++; + } + + return 0; +} + +static int +pipeline_queue_worker_single_stage_burst_tx(void *arg) +{ + PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT; + + while (t->done == false) { + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + + if (!nb_rx) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + rte_prefetch0(ev[i + 1].mbuf); + if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) { + + pipeline_tx_pkt(ev[i].mbuf); + ev[i].op = RTE_EVENT_OP_RELEASE; + w->processed_pkts++; + } else { + ev[i].queue_id++; + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); + } + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + + return 0; +} + +static int +pipeline_queue_worker_single_stage_burst_fwd(void *arg) +{ + PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT; + const uint8_t tx_queue = t->tx_service.queue_id; + + while (t->done == false) { + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + + if (!nb_rx) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + rte_prefetch0(ev[i + 1].mbuf); + ev[i].queue_id = tx_queue; + pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC); + w->processed_pkts++; + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + + return 0; +} + + +static int +pipeline_queue_worker_multi_stage_tx(void *arg) +{ + PIPELINE_WROKER_MULTI_STAGE_INIT; + const uint8_t nb_stages = t->opt->nb_stages + 1; + + while (t->done == false) { + uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!event) { + rte_pause(); + continue; + } + + cq_id = ev.queue_id % nb_stages; + + if (cq_id >= last_queue) { + if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + + pipeline_tx_pkt(ev.mbuf); + w->processed_pkts++; + continue; + } + ev.queue_id += (cq_id == last_queue) ? 1 : 0; + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); + } else { + ev.queue_id++; + pipeline_fwd_event(&ev, sched_type_list[cq_id]); + } + + pipeline_event_enqueue(dev, port, &ev); + } + return 0; +} + +static int +pipeline_queue_worker_multi_stage_fwd(void *arg) +{ + PIPELINE_WROKER_MULTI_STAGE_INIT; + const uint8_t nb_stages = t->opt->nb_stages + 1; + const uint8_t tx_queue = t->tx_service.queue_id; + + while (t->done == false) { + uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); + + if (!event) { + rte_pause(); + continue; + } + + cq_id = ev.queue_id % nb_stages; + + if (cq_id == last_queue) { + ev.queue_id = tx_queue; + pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC); + w->processed_pkts++; + } else { + ev.queue_id++; + pipeline_fwd_event(&ev, sched_type_list[cq_id]); + } + + pipeline_event_enqueue(dev, port, &ev); + } + return 0; +} + +static int +pipeline_queue_worker_multi_stage_burst_tx(void *arg) +{ + PIPELINE_WROKER_MULTI_STAGE_BURST_INIT; + const uint8_t nb_stages = t->opt->nb_stages + 1; + + while (t->done == false) { + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + + if (!nb_rx) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + rte_prefetch0(ev[i + 1].mbuf); + cq_id = ev[i].queue_id % nb_stages; + + if (cq_id >= last_queue) { + if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) { + + pipeline_tx_pkt(ev[i].mbuf); + ev[i].op = RTE_EVENT_OP_RELEASE; + w->processed_pkts++; + continue; + } + + ev[i].queue_id += (cq_id == last_queue) ? 1 : 0; + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); + } else { + ev[i].queue_id++; + pipeline_fwd_event(&ev[i], + sched_type_list[cq_id]); + } + + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + return 0; +} + +static int +pipeline_queue_worker_multi_stage_burst_fwd(void *arg) +{ + PIPELINE_WROKER_MULTI_STAGE_BURST_INIT; + const uint8_t nb_stages = t->opt->nb_stages + 1; + const uint8_t tx_queue = t->tx_service.queue_id; + + while (t->done == false) { + uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev, + BURST_SIZE, 0); + + if (!nb_rx) { + rte_pause(); + continue; + } + + for (i = 0; i < nb_rx; i++) { + rte_prefetch0(ev[i + 1].mbuf); + cq_id = ev[i].queue_id % nb_stages; + + if (cq_id == last_queue) { + ev[i].queue_id = tx_queue; + pipeline_fwd_event(&ev[i], + RTE_SCHED_TYPE_ATOMIC); + w->processed_pkts++; + } else { + ev[i].queue_id++; + pipeline_fwd_event(&ev[i], + sched_type_list[cq_id]); + } + } + + pipeline_event_enqueue_burst(dev, port, ev, nb_rx); + } + return 0; +} + +static int +worker_wrapper(void *arg) +{ + struct worker_data *w = arg; + struct evt_options *opt = w->t->opt; + const bool burst = evt_has_burst_mode(w->dev_id); + const bool mt_safe = !w->t->mt_unsafe; + const uint8_t nb_stages = opt->nb_stages; + RTE_SET_USED(opt); + + if (nb_stages == 1) { + if (!burst && mt_safe) + return pipeline_queue_worker_single_stage_tx(arg); + else if (!burst && !mt_safe) + return pipeline_queue_worker_single_stage_fwd(arg); + else if (burst && mt_safe) + return pipeline_queue_worker_single_stage_burst_tx(arg); + else if (burst && !mt_safe) + return pipeline_queue_worker_single_stage_burst_fwd( + arg); + } else { + if (!burst && mt_safe) + return pipeline_queue_worker_multi_stage_tx(arg); + else if (!burst && !mt_safe) + return pipeline_queue_worker_multi_stage_fwd(arg); + else if (burst && mt_safe) + return pipeline_queue_worker_multi_stage_burst_tx(arg); + else if (burst && !mt_safe) + return pipeline_queue_worker_multi_stage_burst_fwd(arg); + + } + rte_panic("invalid worker\n"); +} + +static int +pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt) +{ + struct test_pipeline *t = evt_test_priv(test); + + if (t->mt_unsafe) + rte_service_component_runstate_set(t->tx_service.service_id, 1); + return pipeline_launch_lcores(test, opt, worker_wrapper); +} + +static int +pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) +{ + int ret; + int nb_ports; + int nb_queues; + int nb_stages = opt->nb_stages; + uint8_t queue; + struct rte_event_dev_info info; + struct test_pipeline *t = evt_test_priv(test); + uint8_t tx_evqueue_id = 0; + uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint8_t nb_worker_queues = 0; + + nb_ports = evt_nr_active_lcores(opt->wlcores); + nb_queues = rte_eth_dev_count() * (nb_stages); + + /* Extra port for Tx service. */ + if (t->mt_unsafe) { + tx_evqueue_id = nb_queues; + nb_ports++; + nb_queues++; + } else + nb_queues += rte_eth_dev_count(); + + rte_event_dev_info_get(opt->dev_id, &info); + + const struct rte_event_dev_config config = { + .nb_event_queues = nb_queues, + .nb_event_ports = nb_ports, + .nb_events_limit = info.max_num_events, + .nb_event_queue_flows = opt->nb_flows, + .nb_event_port_dequeue_depth = + info.max_event_port_dequeue_depth, + .nb_event_port_enqueue_depth = + info.max_event_port_enqueue_depth, + }; + ret = rte_event_dev_configure(opt->dev_id, &config); + if (ret) { + evt_err("failed to configure eventdev %d", opt->dev_id); + return ret; + } + + struct rte_event_queue_conf q_conf = { + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .nb_atomic_flows = opt->nb_flows, + .nb_atomic_order_sequences = opt->nb_flows, + }; + /* queue configurations */ + for (queue = 0; queue < nb_queues; queue++) { + uint8_t slot; + + if (!t->mt_unsafe) { + slot = queue % (nb_stages + 1); + q_conf.schedule_type = slot == nb_stages ? + RTE_SCHED_TYPE_ATOMIC : + opt->sched_type_list[slot]; + } else { + slot = queue % nb_stages; + + if (queue == tx_evqueue_id) { + q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; + q_conf.event_queue_cfg = + RTE_EVENT_QUEUE_CFG_SINGLE_LINK; + } else { + q_conf.schedule_type = + opt->sched_type_list[slot]; + queue_arr[nb_worker_queues] = queue; + nb_worker_queues++; + } + } + + ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); + if (ret) { + evt_err("failed to setup queue=%d", queue); + return ret; + } + } + + if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth) + opt->wkr_deq_dep = info.max_event_port_dequeue_depth; + + /* port configuration */ + const struct rte_event_port_conf p_conf = { + .dequeue_depth = opt->wkr_deq_dep, + .enqueue_depth = info.max_event_port_dequeue_depth, + .new_event_threshold = info.max_num_events, + }; + + /* + * If tx is multi thread safe then allow workers to do Tx else use Tx + * service to Tx packets. + */ + if (t->mt_unsafe) { + ret = pipeline_event_port_setup(test, opt, queue_arr, + nb_worker_queues, p_conf); + if (ret) + return ret; + + ret = pipeline_event_tx_service_setup(test, opt, tx_evqueue_id, + nb_ports - 1, p_conf); + + } else + ret = pipeline_event_port_setup(test, opt, NULL, nb_queues, + p_conf); + + if (ret) + return ret; + /* + * The pipelines are setup in the following manner: + * + * eth_dev_count = 2, nb_stages = 2. + * + * Multi thread safe : + * queues = 6 + * stride = 3 + * + * event queue pipelines: + * eth0 -> q0 -> q1 -> (q2->tx) + * eth1 -> q3 -> q4 -> (q5->tx) + * + * q2, q5 configured as ATOMIC + * + * Multi thread unsafe : + * queues = 5 + * stride = 2 + * + * event queue pipelines: + * eth0 -> q0 -> q1 + * } (q4->tx) Tx service + * eth1 -> q2 -> q3 + * + * q4 configured as SINGLE_LINK|ATOMIC + */ + ret = pipeline_event_rx_adapter_setup(opt, + t->mt_unsafe ? nb_stages : nb_stages + 1, p_conf); + if (ret) + return ret; + + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } + } + + ret = rte_event_dev_start(opt->dev_id); + if (ret) { + evt_err("failed to start eventdev %d", opt->dev_id); + return ret; + } + + return 0; +} + +static void +pipeline_queue_opt_dump(struct evt_options *opt) +{ + pipeline_opt_dump(opt, pipeline_queue_nb_event_queues(opt)); +} + +static int +pipeline_queue_opt_check(struct evt_options *opt) +{ + return pipeline_opt_check(opt, pipeline_queue_nb_event_queues(opt)); +} + +static bool +pipeline_queue_capability_check(struct evt_options *opt) +{ + struct rte_event_dev_info dev_info; + + rte_event_dev_info_get(opt->dev_id, &dev_info); + if (dev_info.max_event_queues < pipeline_queue_nb_event_queues(opt) || + dev_info.max_event_ports < + evt_nr_active_lcores(opt->wlcores)) { + evt_err("not enough eventdev queues=%d/%d or ports=%d/%d", + pipeline_queue_nb_event_queues(opt), + dev_info.max_event_queues, + evt_nr_active_lcores(opt->wlcores), + dev_info.max_event_ports); + } + + return true; +} + +static const struct evt_test_ops pipeline_queue = { + .cap_check = pipeline_queue_capability_check, + .opt_check = pipeline_queue_opt_check, + .opt_dump = pipeline_queue_opt_dump, + .test_setup = pipeline_test_setup, + .mempool_setup = pipeline_mempool_setup, + .ethdev_setup = pipeline_ethdev_setup, + .eventdev_setup = pipeline_queue_eventdev_setup, + .launch_lcores = pipeline_queue_launch_lcores, + .eventdev_destroy = pipeline_eventdev_destroy, + .mempool_destroy = pipeline_mempool_destroy, + .ethdev_destroy = pipeline_ethdev_destroy, + .test_result = pipeline_test_result, + .test_destroy = pipeline_test_destroy, +}; + +EVT_TEST_REGISTER(pipeline_queue); diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile index d21308fc..ed588ab6 100644 --- a/app/test-pmd/Makefile +++ b/app/test-pmd/Makefile @@ -1,33 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2015 Intel Corporation. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2015 Intel Corporation include $(RTE_SDK)/mk/rte.vars.mk @@ -38,6 +10,7 @@ ifeq ($(CONFIG_RTE_TEST_PMD),y) # APP = testpmd +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) @@ -71,6 +44,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y) LDLIBS += -lrte_pmd_bond endif +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_PMD),y) +LDLIBS += -lrte_pmd_dpaa +endif + ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y) LDLIBS += -lrte_pmd_ixgbe endif @@ -83,10 +60,6 @@ ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD),y) LDLIBS += -lrte_pmd_bnxt endif -ifeq ($(CONFIG_RTE_LIBRTE_PMD_XENVIRT),y) -LDLIBS += -lrte_pmd_xenvirt -endif - ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC),y) LDLIBS += -lrte_pmd_softnic endif diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index f71d9630..d1dc1de6 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * Copyright(c) 2014 6WIND S.A. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation. + * Copyright(c) 2014 6WIND S.A. */ #include <stdarg.h> @@ -89,6 +60,9 @@ #include <rte_eth_bond.h> #include <rte_eth_bond_8023ad.h> #endif +#ifdef RTE_LIBRTE_DPAA_PMD +#include <rte_pmd_dpaa.h> +#endif #ifdef RTE_LIBRTE_IXGBE_PMD #include <rte_pmd_ixgbe.h> #endif @@ -271,6 +245,9 @@ static void cmd_help_long_parsed(void *parsed_result, "set verbose (level)\n" " Set the debug verbosity level X.\n\n" + "set log global|(type) (level)\n" + " Set the log level.\n\n" + "set nbport (num)\n" " Set number of ports.\n\n" @@ -486,6 +463,9 @@ static void cmd_help_long_parsed(void *parsed_result, "set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)\n" " Set the MAC address for a VF from the PF.\n\n" + "set eth-peer (port_id) (peer_addr)\n" + " set the peer address for certain port.\n\n" + "set port (port_id) uta (mac_address|all) (on|off)\n" " Add/Remove a or all unicast hash filter(s)" "from port X.\n\n" @@ -665,10 +645,10 @@ static void cmd_help_long_parsed(void *parsed_result, " Set default traffic Management hierarchy on a port\n\n" #endif - "ddp add (port_id) (profile_path[,output_path])\n" + "ddp add (port_id) (profile_path[,backup_profile_path])\n" " Load a profile package on a port\n\n" - "ddp del (port_id) (profile_path)\n" + "ddp del (port_id) (backup_profile_path)\n" " Delete a profile package from a port\n\n" "ptype mapping get (port_id) (valid_only)\n" @@ -698,7 +678,10 @@ static void cmd_help_long_parsed(void *parsed_result, "set port (port_id) queue-region flush (on|off)\n" " flush all queue region related configuration\n\n" - "add port meter profile srtcm_rfc2697 (port_id) (profile_id) (cir) (cbs) (ebs) (color_aware)\n" + "show port meter cap (port_id)\n" + " Show port meter capability information\n\n" + + "add port meter profile srtcm_rfc2697 (port_id) (profile_id) (cir) (cbs) (ebs)\n" " meter profile add - srtcm rfc 2697\n\n" "add port meter profile trtcm_rfc2698 (port_id) (profile_id) (cir) (pir) (cbs) (pbs)\n" @@ -710,16 +693,30 @@ static void cmd_help_long_parsed(void *parsed_result, "del port meter profile (port_id) (profile_id)\n" " meter profile delete\n\n" - "set port meter (port_id) (mtr_id) (profile_id) (g_action) (y_action) (r_action) (stats_mask) (shared)\n" + "create port meter (port_id) (mtr_id) (profile_id) (meter_enable)\n" + "(g_action) (y_action) (r_action) (stats_mask) (shared)\n" + "(use_pre_meter_color) [(dscp_tbl_entry0) (dscp_tbl_entry1)...\n" + "(dscp_tbl_entry63)]\n" " meter create\n\n" + "enable port meter (port_id) (mtr_id)\n" + " meter enable\n\n" + + "disable port meter (port_id) (mtr_id)\n" + " meter disable\n\n" + "del port meter (port_id) (mtr_id)\n" " meter delete\n\n" "set port meter profile (port_id) (mtr_id) (profile_id)\n" " meter update meter profile\n\n" - "set port meter policer action (port_id) (mtr_id) (color) (action)\n" + "set port meter dscp table (port_id) (mtr_id) [(dscp_tbl_entry0)\n" + "(dscp_tbl_entry1)...(dscp_tbl_entry63)]\n" + " update meter dscp table entries\n\n" + + "set port meter policer action (port_id) (mtr_id) (action_mask)\n" + "(action0) [(action1) (action2)]\n" " meter update policer action\n\n" "set port meter stats mask (port_id) (mtr_id) (stats_mask)\n" @@ -864,6 +861,15 @@ static void cmd_help_long_parsed(void *parsed_result, "port config (port_id) pctype mapping update" " (pctype_id_0[,pctype_id_1]*) (flow_type_id)\n" " Update a flow type to pctype mapping item on a port\n\n" + + "port config (port_id) pctype (pctype_id) hash_inset|" + "fdir_inset|fdir_flx_inset get|set|clear field\n" + " (field_idx)\n" + " Configure RSS|FDIR|FDIR_FLX input set for some pctype\n\n" + + "port config (port_id) pctype (pctype_id) hash_inset|" + "fdir_inset|fdir_flx_inset clear all" + " Clear RSS|FDIR|FDIR_FLX input set completely for some pctype\n\n" ); } @@ -979,6 +985,11 @@ static void cmd_help_long_parsed(void *parsed_result, " queue (queue_id) fd_id (fd_id_value)\n" " Add/Del a Tunnel flow director filter.\n\n" + "flow_director_filter (port_id) mode raw (add|del|update)" + " flow (flow_id) (drop|fwd) queue (queue_id)" + " fd_id (fd_id_value) packet (packet file name)\n" + " Add/Del a raw type flow director filter.\n\n" + "flush_flow_director (port_id)\n" " Flush all flow director entries of a device.\n\n" @@ -1501,6 +1512,8 @@ cmd_config_rx_tx_parsed(void *parsed_result, printf("Warning: Either rx or tx queues should be non zero\n"); return; } + if (check_nb_rxq(res->value) != 0) + return; nb_rxq = res->value; } else if (!strcmp(res->name, "txq")) { @@ -1508,6 +1521,8 @@ cmd_config_rx_tx_parsed(void *parsed_result, printf("Warning: Either rx or tx queues should be non zero\n"); return; } + if (check_nb_txq(res->value) != 0) + return; nb_txq = res->value; } else if (!strcmp(res->name, "rxd")) { @@ -1577,29 +1592,36 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, __attribute__((unused)) void *data) { struct cmd_config_max_pkt_len_result *res = parsed_result; + portid_t pid; if (!all_ports_stopped()) { printf("Please stop all ports first\n"); return; } - if (!strcmp(res->name, "max-pkt-len")) { - if (res->value < ETHER_MIN_LEN) { - printf("max-pkt-len can not be less than %d\n", - ETHER_MIN_LEN); + RTE_ETH_FOREACH_DEV(pid) { + struct rte_port *port = &ports[pid]; + uint64_t rx_offloads = port->dev_conf.rxmode.offloads; + + if (!strcmp(res->name, "max-pkt-len")) { + if (res->value < ETHER_MIN_LEN) { + printf("max-pkt-len can not be less than %d\n", + ETHER_MIN_LEN); + return; + } + if (res->value == port->dev_conf.rxmode.max_rx_pkt_len) + return; + + port->dev_conf.rxmode.max_rx_pkt_len = res->value; + if (res->value > ETHER_MAX_LEN) + rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + port->dev_conf.rxmode.offloads = rx_offloads; + } else { + printf("Unknown parameter\n"); return; } - if (res->value == rx_mode.max_rx_pkt_len) - return; - - rx_mode.max_rx_pkt_len = res->value; - if (res->value > ETHER_MAX_LEN) - rx_mode.jumbo_frame = 1; - else - rx_mode.jumbo_frame = 0; - } else { - printf("Unknown parameter\n"); - return; } init_port_config(); @@ -1703,100 +1725,107 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result, __attribute__((unused)) void *data) { struct cmd_config_rx_mode_flag *res = parsed_result; + portid_t pid; if (!all_ports_stopped()) { printf("Please stop all ports first\n"); return; } - if (!strcmp(res->name, "crc-strip")) { - if (!strcmp(res->value, "on")) - rx_mode.hw_strip_crc = 1; - else if (!strcmp(res->value, "off")) - rx_mode.hw_strip_crc = 0; - else { - printf("Unknown parameter\n"); - return; - } - } else if (!strcmp(res->name, "scatter")) { - if (!strcmp(res->value, "on")) - rx_mode.enable_scatter = 1; - else if (!strcmp(res->value, "off")) - rx_mode.enable_scatter = 0; - else { - printf("Unknown parameter\n"); - return; - } - } else if (!strcmp(res->name, "rx-cksum")) { - if (!strcmp(res->value, "on")) - rx_mode.hw_ip_checksum = 1; - else if (!strcmp(res->value, "off")) - rx_mode.hw_ip_checksum = 0; - else { - printf("Unknown parameter\n"); - return; - } - } else if (!strcmp(res->name, "rx-timestamp")) { - if (!strcmp(res->value, "on")) - rx_mode.hw_timestamp = 1; - else if (!strcmp(res->value, "off")) - rx_mode.hw_timestamp = 0; - else { - printf("Unknown parameter\n"); - return; - } - } else if (!strcmp(res->name, "hw-vlan")) { - if (!strcmp(res->value, "on")) { - rx_mode.hw_vlan_filter = 1; - rx_mode.hw_vlan_strip = 1; - } - else if (!strcmp(res->value, "off")) { - rx_mode.hw_vlan_filter = 0; - rx_mode.hw_vlan_strip = 0; - } - else { - printf("Unknown parameter\n"); - return; - } - } else if (!strcmp(res->name, "hw-vlan-filter")) { - if (!strcmp(res->value, "on")) - rx_mode.hw_vlan_filter = 1; - else if (!strcmp(res->value, "off")) - rx_mode.hw_vlan_filter = 0; - else { - printf("Unknown parameter\n"); - return; - } - } else if (!strcmp(res->name, "hw-vlan-strip")) { - if (!strcmp(res->value, "on")) - rx_mode.hw_vlan_strip = 1; - else if (!strcmp(res->value, "off")) - rx_mode.hw_vlan_strip = 0; - else { - printf("Unknown parameter\n"); - return; - } - } else if (!strcmp(res->name, "hw-vlan-extend")) { - if (!strcmp(res->value, "on")) - rx_mode.hw_vlan_extend = 1; - else if (!strcmp(res->value, "off")) - rx_mode.hw_vlan_extend = 0; - else { - printf("Unknown parameter\n"); - return; - } - } else if (!strcmp(res->name, "drop-en")) { - if (!strcmp(res->value, "on")) - rx_drop_en = 1; - else if (!strcmp(res->value, "off")) - rx_drop_en = 0; - else { + RTE_ETH_FOREACH_DEV(pid) { + struct rte_port *port; + uint64_t rx_offloads; + + port = &ports[pid]; + rx_offloads = port->dev_conf.rxmode.offloads; + if (!strcmp(res->name, "crc-strip")) { + if (!strcmp(res->value, "on")) + rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + else if (!strcmp(res->value, "off")) + rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; + else { + printf("Unknown parameter\n"); + return; + } + } else if (!strcmp(res->name, "scatter")) { + if (!strcmp(res->value, "on")) { + rx_offloads |= DEV_RX_OFFLOAD_SCATTER; + } else if (!strcmp(res->value, "off")) { + rx_offloads &= ~DEV_RX_OFFLOAD_SCATTER; + } else { + printf("Unknown parameter\n"); + return; + } + } else if (!strcmp(res->name, "rx-cksum")) { + if (!strcmp(res->value, "on")) + rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM; + else if (!strcmp(res->value, "off")) + rx_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM; + else { + printf("Unknown parameter\n"); + return; + } + } else if (!strcmp(res->name, "rx-timestamp")) { + if (!strcmp(res->value, "on")) + rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP; + else if (!strcmp(res->value, "off")) + rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP; + else { + printf("Unknown parameter\n"); + return; + } + } else if (!strcmp(res->name, "hw-vlan")) { + if (!strcmp(res->value, "on")) { + rx_offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP); + } else if (!strcmp(res->value, "off")) { + rx_offloads &= ~(DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP); + } else { + printf("Unknown parameter\n"); + return; + } + } else if (!strcmp(res->name, "hw-vlan-filter")) { + if (!strcmp(res->value, "on")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + else if (!strcmp(res->value, "off")) + rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; + else { + printf("Unknown parameter\n"); + return; + } + } else if (!strcmp(res->name, "hw-vlan-strip")) { + if (!strcmp(res->value, "on")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + else if (!strcmp(res->value, "off")) + rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + else { + printf("Unknown parameter\n"); + return; + } + } else if (!strcmp(res->name, "hw-vlan-extend")) { + if (!strcmp(res->value, "on")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + else if (!strcmp(res->value, "off")) + rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; + else { + printf("Unknown parameter\n"); + return; + } + } else if (!strcmp(res->name, "drop-en")) { + if (!strcmp(res->value, "on")) + rx_drop_en = 1; + else if (!strcmp(res->value, "off")) + rx_drop_en = 0; + else { + printf("Unknown parameter\n"); + return; + } + } else { printf("Unknown parameter\n"); return; } - } else { - printf("Unknown parameter\n"); - return; + port->dev_conf.rxmode.offloads = rx_offloads; } init_port_config(); @@ -3000,6 +3029,55 @@ cmdline_parse_inst_t cmd_set_numbers = { }, }; +/* *** SET LOG LEVEL CONFIGURATION *** */ + +struct cmd_set_log_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t log; + cmdline_fixed_string_t type; + uint32_t level; +}; + +static void +cmd_set_log_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_log_result *res; + int ret; + + res = parsed_result; + if (!strcmp(res->type, "global")) + rte_log_set_global_level(res->level); + else { + ret = rte_log_set_level_regexp(res->type, res->level); + if (ret < 0) + printf("Unable to set log level\n"); + } +} + +cmdline_parse_token_string_t cmd_set_log_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_log_result, set, "set"); +cmdline_parse_token_string_t cmd_set_log_log = + TOKEN_STRING_INITIALIZER(struct cmd_set_log_result, log, "log"); +cmdline_parse_token_string_t cmd_set_log_type = + TOKEN_STRING_INITIALIZER(struct cmd_set_log_result, type, NULL); +cmdline_parse_token_num_t cmd_set_log_level = + TOKEN_NUM_INITIALIZER(struct cmd_set_log_result, level, UINT32); + +cmdline_parse_inst_t cmd_set_log = { + .f = cmd_set_log_parsed, + .data = NULL, + .help_str = "set log global|<type> <level>", + .tokens = { + (void *)&cmd_set_log_set, + (void *)&cmd_set_log_log, + (void *)&cmd_set_log_type, + (void *)&cmd_set_log_level, + NULL, + }, +}; + /* *** SET SEGMENT LENGTHS OF TXONLY PACKETS *** */ struct cmd_set_txpkts_result { @@ -3087,74 +3165,6 @@ cmdline_parse_inst_t cmd_set_txsplit = { }, }; -/* *** CONFIG TX QUEUE FLAGS *** */ - -struct cmd_config_txqflags_result { - cmdline_fixed_string_t port; - cmdline_fixed_string_t config; - cmdline_fixed_string_t all; - cmdline_fixed_string_t what; - int32_t hexvalue; -}; - -static void cmd_config_txqflags_parsed(void *parsed_result, - __attribute__((unused)) struct cmdline *cl, - __attribute__((unused)) void *data) -{ - struct cmd_config_txqflags_result *res = parsed_result; - - if (!all_ports_stopped()) { - printf("Please stop all ports first\n"); - return; - } - - if (strcmp(res->what, "txqflags")) { - printf("Unknown parameter\n"); - return; - } - - if (res->hexvalue >= 0) { - txq_flags = res->hexvalue; - } else { - printf("txqflags must be >= 0\n"); - return; - } - - init_port_config(); - - cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); -} - -cmdline_parse_token_string_t cmd_config_txqflags_port = - TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, port, - "port"); -cmdline_parse_token_string_t cmd_config_txqflags_config = - TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, config, - "config"); -cmdline_parse_token_string_t cmd_config_txqflags_all = - TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, all, - "all"); -cmdline_parse_token_string_t cmd_config_txqflags_what = - TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, what, - "txqflags"); -cmdline_parse_token_num_t cmd_config_txqflags_value = - TOKEN_NUM_INITIALIZER(struct cmd_config_txqflags_result, - hexvalue, INT32); - -cmdline_parse_inst_t cmd_config_txqflags = { - .f = cmd_config_txqflags_parsed, - .data = NULL, - .help_str = "port config all txqflags <value>", - .tokens = { - (void *)&cmd_config_txqflags_port, - (void *)&cmd_config_txqflags_config, - (void *)&cmd_config_txqflags_all, - (void *)&cmd_config_txqflags_what, - (void *)&cmd_config_txqflags_value, - NULL, - }, -}; - /* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */ struct cmd_rx_vlan_filter_all_result { cmdline_fixed_string_t rx_vlan; @@ -3352,7 +3362,7 @@ cmdline_parse_token_num_t cmd_vlan_tpid_tpid = tp_id, UINT16); cmdline_parse_token_num_t cmd_vlan_tpid_portid = TOKEN_NUM_INITIALIZER(struct cmd_vlan_tpid_result, - port_id, UINT8); + port_id, UINT16); cmdline_parse_inst_t cmd_vlan_tpid = { .f = cmd_vlan_tpid_parsed, @@ -3434,7 +3444,14 @@ cmd_tx_vlan_set_parsed(void *parsed_result, { struct cmd_tx_vlan_set_result *res = parsed_result; + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + tx_vlan_set(res->port_id, res->vlan_id); + + cmd_reconfig_device_queue(res->port_id, 1, 1); } cmdline_parse_token_string_t cmd_tx_vlan_set_tx_vlan = @@ -3481,7 +3498,14 @@ cmd_tx_vlan_set_qinq_parsed(void *parsed_result, { struct cmd_tx_vlan_set_qinq_result *res = parsed_result; + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + tx_qinq_set(res->port_id, res->vlan_id, res->vlan_id_outer); + + cmd_reconfig_device_queue(res->port_id, 1, 1); } cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_tx_vlan = @@ -3587,7 +3611,14 @@ cmd_tx_vlan_reset_parsed(void *parsed_result, { struct cmd_tx_vlan_reset_result *res = parsed_result; + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + tx_vlan_reset(res->port_id); + + cmd_reconfig_device_queue(res->port_id, 1, 1); } cmdline_parse_token_string_t cmd_tx_vlan_reset_tx_vlan = @@ -3627,45 +3658,45 @@ static void csum_show(int port_id) { struct rte_eth_dev_info dev_info; - uint16_t ol_flags; + uint64_t tx_offloads; - ol_flags = ports[port_id].tx_ol_flags; + tx_offloads = ports[port_id].dev_conf.txmode.offloads; printf("Parse tunnel is %s\n", - (ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) ? "on" : "off"); + (ports[port_id].parse_tunnel) ? "on" : "off"); printf("IP checksum offload is %s\n", - (ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) ? "hw" : "sw"); + (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw"); printf("UDP checksum offload is %s\n", - (ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw"); + (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw"); printf("TCP checksum offload is %s\n", - (ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw"); + (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw"); printf("SCTP checksum offload is %s\n", - (ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw"); + (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw"); printf("Outer-Ip checksum offload is %s\n", - (ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ? "hw" : "sw"); + (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw"); /* display warnings if configuration is not supported by the NIC */ rte_eth_dev_info_get(port_id, &dev_info); - if ((ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) && + if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) && (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) { printf("Warning: hardware IP checksum enabled but not " "supported by port %d\n", port_id); } - if ((ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) && + if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) && (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) { printf("Warning: hardware UDP checksum enabled but not " "supported by port %d\n", port_id); } - if ((ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) && + if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) && (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) { printf("Warning: hardware TCP checksum enabled but not " "supported by port %d\n", port_id); } - if ((ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) && + if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) { printf("Warning: hardware SCTP checksum enabled but not " "supported by port %d\n", port_id); } - if ((ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) && + if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) && (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) { printf("Warning: hardware outer IP checksum enabled but not " "supported by port %d\n", port_id); @@ -3679,36 +3710,78 @@ cmd_csum_parsed(void *parsed_result, { struct cmd_csum_result *res = parsed_result; int hw = 0; - uint16_t mask = 0; + uint64_t csum_offloads = 0; + struct rte_eth_dev_info dev_info; if (port_id_is_invalid(res->port_id, ENABLED_WARN)) { printf("invalid port %d\n", res->port_id); return; } + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + rte_eth_dev_info_get(res->port_id, &dev_info); if (!strcmp(res->mode, "set")) { if (!strcmp(res->hwsw, "hw")) hw = 1; if (!strcmp(res->proto, "ip")) { - mask = TESTPMD_TX_OFFLOAD_IP_CKSUM; + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_IPV4_CKSUM)) { + csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + } else { + printf("IP checksum offload is not supported " + "by port %u\n", res->port_id); + } } else if (!strcmp(res->proto, "udp")) { - mask = TESTPMD_TX_OFFLOAD_UDP_CKSUM; + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_UDP_CKSUM)) { + csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + } else { + printf("UDP checksum offload is not supported " + "by port %u\n", res->port_id); + } } else if (!strcmp(res->proto, "tcp")) { - mask = TESTPMD_TX_OFFLOAD_TCP_CKSUM; + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_TCP_CKSUM)) { + csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + } else { + printf("TCP checksum offload is not supported " + "by port %u\n", res->port_id); + } } else if (!strcmp(res->proto, "sctp")) { - mask = TESTPMD_TX_OFFLOAD_SCTP_CKSUM; + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_SCTP_CKSUM)) { + csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM; + } else { + printf("SCTP checksum offload is not supported " + "by port %u\n", res->port_id); + } } else if (!strcmp(res->proto, "outer-ip")) { - mask = TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM; + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { + csum_offloads |= + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + } else { + printf("Outer IP checksum offload is not " + "supported by port %u\n", res->port_id); + } } - if (hw) - ports[res->port_id].tx_ol_flags |= mask; - else - ports[res->port_id].tx_ol_flags &= (~mask); + if (hw) { + ports[res->port_id].dev_conf.txmode.offloads |= + csum_offloads; + } else { + ports[res->port_id].dev_conf.txmode.offloads &= + (~csum_offloads); + } } csum_show(res->port_id); + + cmd_reconfig_device_queue(res->port_id, 1, 1); } cmdline_parse_token_string_t cmd_csum_csum = @@ -3778,11 +3851,9 @@ cmd_csum_tunnel_parsed(void *parsed_result, return; if (!strcmp(res->onoff, "on")) - ports[res->port_id].tx_ol_flags |= - TESTPMD_TX_OFFLOAD_PARSE_TUNNEL; + ports[res->port_id].parse_tunnel = 1; else - ports[res->port_id].tx_ol_flags &= - (~TESTPMD_TX_OFFLOAD_PARSE_TUNNEL); + ports[res->port_id].parse_tunnel = 0; csum_show(res->port_id); } @@ -3832,15 +3903,32 @@ cmd_tso_set_parsed(void *parsed_result, if (port_id_is_invalid(res->port_id, ENABLED_WARN)) return; + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } if (!strcmp(res->mode, "set")) ports[res->port_id].tso_segsz = res->tso_segsz; - if (ports[res->port_id].tso_segsz == 0) + rte_eth_dev_info_get(res->port_id, &dev_info); + if ((ports[res->port_id].tso_segsz != 0) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) { + printf("Error: TSO is not supported by port %d\n", + res->port_id); + return; + } + + if (ports[res->port_id].tso_segsz == 0) { + ports[res->port_id].dev_conf.txmode.offloads &= + ~DEV_TX_OFFLOAD_TCP_TSO; printf("TSO for non-tunneled packets is disabled\n"); - else + } else { + ports[res->port_id].dev_conf.txmode.offloads |= + DEV_TX_OFFLOAD_TCP_TSO; printf("TSO segment size for non-tunneled packets is %d\n", ports[res->port_id].tso_segsz); + } /* display warnings if configuration is not supported by the NIC */ rte_eth_dev_info_get(res->port_id, &dev_info); @@ -3849,6 +3937,8 @@ cmd_tso_set_parsed(void *parsed_result, printf("Warning: TSO enabled but not " "supported by port %d\n", res->port_id); } + + cmd_reconfig_device_queue(res->port_id, 1, 1); } cmdline_parse_token_string_t cmd_tso_set_tso = @@ -3905,24 +3995,25 @@ struct cmd_tunnel_tso_set_result { portid_t port_id; }; -static void +static struct rte_eth_dev_info check_tunnel_tso_nic_support(portid_t port_id) { struct rte_eth_dev_info dev_info; rte_eth_dev_info_get(port_id, &dev_info); if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) - printf("Warning: TSO enabled but VXLAN TUNNEL TSO not " - "supported by port %d\n", port_id); + printf("Warning: VXLAN TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO)) - printf("Warning: TSO enabled but GRE TUNNEL TSO not " - "supported by port %d\n", port_id); + printf("Warning: GRE TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO)) - printf("Warning: TSO enabled but IPIP TUNNEL TSO not " - "supported by port %d\n", port_id); + printf("Warning: IPIP TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) - printf("Warning: TSO enabled but GENEVE TUNNEL TSO not " - "supported by port %d\n", port_id); + printf("Warning: GENEVE TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); + return dev_info; } static void @@ -3931,16 +4022,34 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, __attribute__((unused)) void *data) { struct cmd_tunnel_tso_set_result *res = parsed_result; + struct rte_eth_dev_info dev_info; if (port_id_is_invalid(res->port_id, ENABLED_WARN)) return; + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } if (!strcmp(res->mode, "set")) ports[res->port_id].tunnel_tso_segsz = res->tso_segsz; - if (ports[res->port_id].tunnel_tso_segsz == 0) + dev_info = check_tunnel_tso_nic_support(res->port_id); + if (ports[res->port_id].tunnel_tso_segsz == 0) { + ports[res->port_id].dev_conf.txmode.offloads &= + ~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); printf("TSO for tunneled packets is disabled\n"); - else { + } else { + uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + + ports[res->port_id].dev_conf.txmode.offloads |= + (tso_offloads & dev_info.tx_offload_capa); printf("TSO segment size for tunneled packets is %d\n", ports[res->port_id].tunnel_tso_segsz); @@ -3955,17 +4064,17 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, * is not necessary for IPv6 tunneled pkts because there's no * checksum in IP header anymore. */ - check_tunnel_tso_nic_support(res->port_id); - if (!(ports[res->port_id].tx_ol_flags & - TESTPMD_TX_OFFLOAD_PARSE_TUNNEL)) + if (!ports[res->port_id].parse_tunnel) printf("Warning: csum parse_tunnel must be set " "so that tunneled packets are recognized\n"); - if (!(ports[res->port_id].tx_ol_flags & - TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)) + if (!(ports[res->port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) printf("Warning: csum set outer-ip must be set to hw " "if outer L3 is IPv4; not necessary for IPv6\n"); } + + cmd_reconfig_device_queue(res->port_id, 1, 1); } cmdline_parse_token_string_t cmd_tunnel_tso_set_tso = @@ -5751,7 +5860,7 @@ cmdline_parse_token_string_t cmd_setpromisc_portall = "all"); cmdline_parse_token_num_t cmd_setpromisc_portnum = TOKEN_NUM_INITIALIZER(struct cmd_set_promisc_mode_result, port_num, - UINT8); + UINT16); cmdline_parse_token_string_t cmd_setpromisc_mode = TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, mode, "on#off"); @@ -7120,6 +7229,50 @@ cmdline_parse_inst_t cmd_mac_addr = { }, }; +/* *** SET THE PEER ADDRESS FOR CERTAIN PORT *** */ +struct cmd_eth_peer_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t eth_peer; + portid_t port_id; + cmdline_fixed_string_t peer_addr; +}; + +static void cmd_set_eth_peer_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_eth_peer_result *res = parsed_result; + + if (test_done == 0) { + printf("Please stop forwarding first\n"); + return; + } + if (!strcmp(res->eth_peer, "eth-peer")) { + set_fwd_eth_peer(res->port_id, res->peer_addr); + fwd_config_setup(); + } +} +cmdline_parse_token_string_t cmd_eth_peer_set = + TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, set, "set"); +cmdline_parse_token_string_t cmd_eth_peer = + TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, eth_peer, "eth-peer"); +cmdline_parse_token_num_t cmd_eth_peer_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_eth_peer_result, port_id, UINT16); +cmdline_parse_token_string_t cmd_eth_peer_addr = + TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, peer_addr, NULL); + +cmdline_parse_inst_t cmd_set_fwd_eth_peer = { + .f = cmd_set_eth_peer_parsed, + .data = NULL, + .help_str = "set eth-peer <port_id> <peer_mac>", + .tokens = { + (void *)&cmd_eth_peer_set, + (void *)&cmd_eth_peer, + (void *)&cmd_eth_peer_port_id, + (void *)&cmd_eth_peer_addr, + NULL, + }, +}; /* *** CONFIGURE QUEUE STATS COUNTER MAPPINGS *** */ struct cmd_set_qmap_result { @@ -9747,11 +9900,11 @@ struct cmd_flow_director_result { uint16_t port_dst; cmdline_fixed_string_t verify_tag; uint32_t verify_tag_value; - cmdline_ipaddr_t tos; + cmdline_fixed_string_t tos; uint8_t tos_value; - cmdline_ipaddr_t proto; + cmdline_fixed_string_t proto; uint8_t proto_value; - cmdline_ipaddr_t ttl; + cmdline_fixed_string_t ttl; uint8_t ttl_value; cmdline_fixed_string_t vlan; uint16_t vlan_value; @@ -9769,6 +9922,8 @@ struct cmd_flow_director_result { cmdline_fixed_string_t tunnel_type; cmdline_fixed_string_t tunnel_id; uint32_t tunnel_id_value; + cmdline_fixed_string_t packet; + char filepath[]; }; static inline int @@ -9918,8 +10073,62 @@ cmd_flow_director_filter_parsed(void *parsed_result, return; } } else { - if (strcmp(res->mode_value, "IP")) { - printf("Please set mode to IP.\n"); + if (!strcmp(res->mode_value, "raw")) { +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_flow_type_mapping + mapping[RTE_PMD_I40E_FLOW_TYPE_MAX]; + struct rte_pmd_i40e_pkt_template_conf conf; + uint16_t flow_type = str2flowtype(res->flow_type); + uint16_t i, port = res->port_id; + uint8_t add; + + memset(&conf, 0, sizeof(conf)); + + if (flow_type == RTE_ETH_FLOW_UNKNOWN) { + printf("Invalid flow type specified.\n"); + return; + } + ret = rte_pmd_i40e_flow_type_mapping_get(res->port_id, + mapping); + if (ret) + return; + if (mapping[flow_type].pctype == 0ULL) { + printf("Invalid flow type specified.\n"); + return; + } + for (i = 0; i < RTE_PMD_I40E_PCTYPE_MAX; i++) { + if (mapping[flow_type].pctype & (1ULL << i)) { + conf.input.pctype = i; + break; + } + } + + conf.input.packet = open_file(res->filepath, + &conf.input.length); + if (!conf.input.packet) + return; + if (!strcmp(res->drop, "drop")) + conf.action.behavior = + RTE_PMD_I40E_PKT_TEMPLATE_REJECT; + else + conf.action.behavior = + RTE_PMD_I40E_PKT_TEMPLATE_ACCEPT; + conf.action.report_status = + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID; + conf.action.rx_queue = res->queue_id; + conf.soft_id = res->fd_id_value; + add = strcmp(res->ops, "del") ? 1 : 0; + ret = rte_pmd_i40e_flow_add_del_packet_template(port, + &conf, + add); + if (ret < 0) + printf("flow director config error: (%s)\n", + strerror(-ret)); + close_file(conf.input.packet); +#endif + return; + } else if (strcmp(res->mode_value, "IP")) { + printf("Please set mode to IP or raw.\n"); return; } entry.input.flow_type = str2flowtype(res->flow_type); @@ -10091,8 +10300,7 @@ cmdline_parse_token_string_t cmd_flow_director_flow = flow, "flow"); cmdline_parse_token_string_t cmd_flow_director_flow_type = TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, - flow_type, "ipv4-other#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#" - "ipv6-other#ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#l2_payload"); + flow_type, NULL); cmdline_parse_token_string_t cmd_flow_director_ether = TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, ether, "ether"); @@ -10184,6 +10392,9 @@ cmdline_parse_token_string_t cmd_flow_director_mode_mac_vlan = cmdline_parse_token_string_t cmd_flow_director_mode_tunnel = TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, mode_value, "Tunnel"); +cmdline_parse_token_string_t cmd_flow_director_mode_raw = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + mode_value, "raw"); cmdline_parse_token_string_t cmd_flow_director_mac = TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, mac, "mac"); @@ -10202,6 +10413,12 @@ cmdline_parse_token_string_t cmd_flow_director_tunnel_id = cmdline_parse_token_num_t cmd_flow_director_tunnel_id_value = TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, tunnel_id_value, UINT32); +cmdline_parse_token_string_t cmd_flow_director_packet = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + packet, "packet"); +cmdline_parse_token_string_t cmd_flow_director_filepath = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + filepath, NULL); cmdline_parse_inst_t cmd_add_del_ip_flow_director = { .f = cmd_flow_director_filter_parsed, @@ -10298,7 +10515,7 @@ cmdline_parse_inst_t cmd_add_del_sctp_flow_director = { (void *)&cmd_flow_director_flow_type, (void *)&cmd_flow_director_src, (void *)&cmd_flow_director_ip_src, - (void *)&cmd_flow_director_port_dst, + (void *)&cmd_flow_director_port_src, (void *)&cmd_flow_director_dst, (void *)&cmd_flow_director_ip_dst, (void *)&cmd_flow_director_port_dst, @@ -10405,6 +10622,30 @@ cmdline_parse_inst_t cmd_add_del_tunnel_flow_director = { }, }; +cmdline_parse_inst_t cmd_add_del_raw_flow_director = { + .f = cmd_flow_director_filter_parsed, + .data = NULL, + .help_str = "flow_director_filter ... : Add or delete a raw flow " + "director entry on NIC", + .tokens = { + (void *)&cmd_flow_director_filter, + (void *)&cmd_flow_director_port_id, + (void *)&cmd_flow_director_mode, + (void *)&cmd_flow_director_mode_raw, + (void *)&cmd_flow_director_ops, + (void *)&cmd_flow_director_flow, + (void *)&cmd_flow_director_flow_type, + (void *)&cmd_flow_director_drop, + (void *)&cmd_flow_director_queue, + (void *)&cmd_flow_director_queue_id, + (void *)&cmd_flow_director_fd_id, + (void *)&cmd_flow_director_fd_id_value, + (void *)&cmd_flow_director_packet, + (void *)&cmd_flow_director_filepath, + NULL, + }, +}; + struct cmd_flush_flow_director_result { cmdline_fixed_string_t flush_flow_director; portid_t port_id; @@ -10681,7 +10922,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result, struct rte_eth_fdir_info fdir_info; struct rte_eth_fdir_flex_mask flex_mask; struct rte_port *port; - uint32_t flow_type_mask; + uint64_t flow_type_mask; uint16_t i; int ret; @@ -10734,7 +10975,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result, return; } for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { - if (flow_type_mask & (1 << i)) { + if (flow_type_mask & (1ULL << i)) { flex_mask.flow_type = i; fdir_set_flex_mask(res->port_id, &flex_mask); } @@ -10743,7 +10984,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result, return; } flex_mask.flow_type = str2flowtype(res->flow_type); - if (!(flow_type_mask & (1 << flex_mask.flow_type))) { + if (!(flow_type_mask & (1ULL << flex_mask.flow_type))) { printf("Flow type %s not supported on port %d\n", res->flow_type, res->port_id); return; @@ -11105,10 +11346,10 @@ cmd_get_hash_global_config_parsed(void *parsed_result, } for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { - idx = i / UINT32_BIT; - offset = i % UINT32_BIT; + idx = i / UINT64_BIT; + offset = i % UINT64_BIT; if (!(info.info.global_conf.valid_bit_mask[idx] & - (1UL << offset))) + (1ULL << offset))) continue; str = flowtype_to_str(i); if (!str) @@ -11116,7 +11357,7 @@ cmd_get_hash_global_config_parsed(void *parsed_result, printf("Symmetric hash is %s globally for flow type %s " "by port %d\n", ((info.info.global_conf.sym_hash_enable_mask[idx] & - (1UL << offset)) ? "enabled" : "disabled"), str, + (1ULL << offset)) ? "enabled" : "disabled"), str, res->port_id); } } @@ -11177,12 +11418,12 @@ cmd_set_hash_global_config_parsed(void *parsed_result, RTE_ETH_HASH_FUNCTION_DEFAULT; ftype = str2flowtype(res->flow_type); - idx = ftype / (CHAR_BIT * sizeof(uint32_t)); - offset = ftype % (CHAR_BIT * sizeof(uint32_t)); - info.info.global_conf.valid_bit_mask[idx] |= (1UL << offset); + idx = ftype / UINT64_BIT; + offset = ftype % UINT64_BIT; + info.info.global_conf.valid_bit_mask[idx] |= (1ULL << offset); if (!strcmp(res->enable, "enable")) info.info.global_conf.sym_hash_enable_mask[idx] |= - (1UL << offset); + (1ULL << offset); ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH, RTE_ETH_FILTER_SET, &info); if (ret < 0) @@ -12620,6 +12861,10 @@ cmd_set_tx_loopback_parsed( if (ret == -ENOTSUP) ret = rte_pmd_bnxt_set_tx_loopback(res->port_id, is_on); #endif +#ifdef RTE_LIBRTE_DPAA_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_dpaa_set_tx_loopback(res->port_id, is_on); +#endif switch (ret) { case 0: @@ -13004,19 +13249,29 @@ cmd_set_macsec_offload_on_parsed( portid_t port_id = res->port_id; int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0; int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0; + struct rte_eth_dev_info dev_info; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + if (!port_is_stopped(port_id)) { + printf("Please stop port %d first\n", port_id); + return; + } - ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC; + rte_eth_dev_info_get(port_id, &dev_info); + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) { #ifdef RTE_LIBRTE_IXGBE_PMD - ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp); + ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp); #endif + } RTE_SET_USED(en); RTE_SET_USED(rp); switch (ret) { case 0: + ports[port_id].dev_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MACSEC_INSERT; + cmd_reconfig_device_queue(port_id, 1, 1); break; case -ENODEV: printf("invalid port_id %d\n", port_id); @@ -13087,18 +13342,27 @@ cmd_set_macsec_offload_off_parsed( { struct cmd_macsec_offload_off_result *res = parsed_result; int ret = -ENOTSUP; + struct rte_eth_dev_info dev_info; portid_t port_id = res->port_id; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + if (!port_is_stopped(port_id)) { + printf("Please stop port %d first\n", port_id); + return; + } - ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC; + rte_eth_dev_info_get(port_id, &dev_info); + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) { #ifdef RTE_LIBRTE_IXGBE_PMD - ret = rte_pmd_ixgbe_macsec_disable(port_id); + ret = rte_pmd_ixgbe_macsec_disable(port_id); #endif - + } switch (ret) { case 0: + ports[port_id].dev_conf.txmode.offloads &= + ~DEV_TX_OFFLOAD_MACSEC_INSERT; + cmd_reconfig_device_queue(port_id, 1, 1); break; case -ENODEV: printf("invalid port_id %d\n", port_id); @@ -14220,7 +14484,7 @@ cmd_ddp_add_parsed( } file_num = rte_strsplit(filepath, strlen(filepath), file_fld, 2, ','); - buff = open_ddp_package_file(file_fld[0], &size); + buff = open_file(file_fld[0], &size); if (!buff) { free((void *)filepath); return; @@ -14238,16 +14502,16 @@ cmd_ddp_add_parsed( else if (ret < 0) printf("Failed to load profile.\n"); else if (file_num == 2) - save_ddp_package_file(file_fld[1], buff, size); + save_file(file_fld[1], buff, size); - close_ddp_package_file(buff); + close_file(buff); free((void *)filepath); } cmdline_parse_inst_t cmd_ddp_add = { .f = cmd_ddp_add_parsed, .data = NULL, - .help_str = "ddp add <port_id> <profile_path[,output_path]>", + .help_str = "ddp add <port_id> <profile_path[,backup_profile_path]>", .tokens = { (void *)&cmd_ddp_add_ddp, (void *)&cmd_ddp_add_add, @@ -14295,7 +14559,7 @@ cmd_ddp_del_parsed( return; } - buff = open_ddp_package_file(res->filepath, &size); + buff = open_file(res->filepath, &size); if (!buff) return; @@ -14311,13 +14575,13 @@ cmd_ddp_del_parsed( else if (ret < 0) printf("Failed to delete profile.\n"); - close_ddp_package_file(buff); + close_file(buff); } cmdline_parse_inst_t cmd_ddp_del = { .f = cmd_ddp_del_parsed, .data = NULL, - .help_str = "ddp del <port_id> <profile_path>", + .help_str = "ddp del <port_id> <backup_profile_path>", .tokens = { (void *)&cmd_ddp_del_ddp, (void *)&cmd_ddp_del_del, @@ -14371,7 +14635,7 @@ cmd_ddp_info_parsed( #endif - pkg = open_ddp_package_file(res->filepath, &pkg_size); + pkg = open_file(res->filepath, &pkg_size); if (!pkg) return; @@ -14548,7 +14812,7 @@ no_print_return: #endif if (ret == -ENOTSUP) printf("Function not supported in PMD driver\n"); - close_ddp_package_file(pkg); + close_file(pkg); } cmdline_parse_inst_t cmd_ddp_get_info = { @@ -14652,6 +14916,237 @@ cmdline_parse_inst_t cmd_ddp_get_list = { }, }; +/* Configure input set */ +struct cmd_cfg_input_set_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t cfg; + portid_t port_id; + cmdline_fixed_string_t pctype; + uint8_t pctype_id; + cmdline_fixed_string_t inset_type; + cmdline_fixed_string_t opt; + cmdline_fixed_string_t field; + uint8_t field_idx; +}; + +static void +cmd_cfg_input_set_parsed( + void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_cfg_input_set_result *res = parsed_result; +#ifdef RTE_LIBRTE_I40E_PMD + enum rte_pmd_i40e_inset_type inset_type = INSET_NONE; + struct rte_pmd_i40e_inset inset; +#endif + int ret = -ENOTSUP; + + if (res->port_id > nb_ports) { + printf("Invalid port, range is [0, %d]\n", nb_ports - 1); + return; + } + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + +#ifdef RTE_LIBRTE_I40E_PMD + if (!strcmp(res->inset_type, "hash_inset")) + inset_type = INSET_HASH; + else if (!strcmp(res->inset_type, "fdir_inset")) + inset_type = INSET_FDIR; + else if (!strcmp(res->inset_type, "fdir_flx_inset")) + inset_type = INSET_FDIR_FLX; + ret = rte_pmd_i40e_inset_get(res->port_id, res->pctype_id, + &inset, inset_type); + if (ret) { + printf("Failed to get input set.\n"); + return; + } + + if (!strcmp(res->opt, "get")) { + ret = rte_pmd_i40e_inset_field_get(inset.inset, + res->field_idx); + if (ret) + printf("Field index %d is enabled.\n", res->field_idx); + else + printf("Field index %d is disabled.\n", res->field_idx); + return; + } else if (!strcmp(res->opt, "set")) + ret = rte_pmd_i40e_inset_field_set(&inset.inset, + res->field_idx); + else if (!strcmp(res->opt, "clear")) + ret = rte_pmd_i40e_inset_field_clear(&inset.inset, + res->field_idx); + if (ret) { + printf("Failed to configure input set field.\n"); + return; + } + + ret = rte_pmd_i40e_inset_set(res->port_id, res->pctype_id, + &inset, inset_type); + if (ret) { + printf("Failed to set input set.\n"); + return; + } +#endif + + if (ret == -ENOTSUP) + printf("Function not supported\n"); +} + +cmdline_parse_token_string_t cmd_cfg_input_set_port = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + port, "port"); +cmdline_parse_token_string_t cmd_cfg_input_set_cfg = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + cfg, "config"); +cmdline_parse_token_num_t cmd_cfg_input_set_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_cfg_input_set_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_cfg_input_set_pctype = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + pctype, "pctype"); +cmdline_parse_token_num_t cmd_cfg_input_set_pctype_id = + TOKEN_NUM_INITIALIZER(struct cmd_cfg_input_set_result, + pctype_id, UINT8); +cmdline_parse_token_string_t cmd_cfg_input_set_inset_type = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + inset_type, + "hash_inset#fdir_inset#fdir_flx_inset"); +cmdline_parse_token_string_t cmd_cfg_input_set_opt = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + opt, "get#set#clear"); +cmdline_parse_token_string_t cmd_cfg_input_set_field = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + field, "field"); +cmdline_parse_token_num_t cmd_cfg_input_set_field_idx = + TOKEN_NUM_INITIALIZER(struct cmd_cfg_input_set_result, + field_idx, UINT8); + +cmdline_parse_inst_t cmd_cfg_input_set = { + .f = cmd_cfg_input_set_parsed, + .data = NULL, + .help_str = "port config <port_id> pctype <pctype_id> hash_inset|" + "fdir_inset|fdir_flx_inset get|set|clear field <field_idx>", + .tokens = { + (void *)&cmd_cfg_input_set_port, + (void *)&cmd_cfg_input_set_cfg, + (void *)&cmd_cfg_input_set_port_id, + (void *)&cmd_cfg_input_set_pctype, + (void *)&cmd_cfg_input_set_pctype_id, + (void *)&cmd_cfg_input_set_inset_type, + (void *)&cmd_cfg_input_set_opt, + (void *)&cmd_cfg_input_set_field, + (void *)&cmd_cfg_input_set_field_idx, + NULL, + }, +}; + +/* Clear input set */ +struct cmd_clear_input_set_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t cfg; + portid_t port_id; + cmdline_fixed_string_t pctype; + uint8_t pctype_id; + cmdline_fixed_string_t inset_type; + cmdline_fixed_string_t clear; + cmdline_fixed_string_t all; +}; + +static void +cmd_clear_input_set_parsed( + void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_clear_input_set_result *res = parsed_result; +#ifdef RTE_LIBRTE_I40E_PMD + enum rte_pmd_i40e_inset_type inset_type = INSET_NONE; + struct rte_pmd_i40e_inset inset; +#endif + int ret = -ENOTSUP; + + if (res->port_id > nb_ports) { + printf("Invalid port, range is [0, %d]\n", nb_ports - 1); + return; + } + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + +#ifdef RTE_LIBRTE_I40E_PMD + if (!strcmp(res->inset_type, "hash_inset")) + inset_type = INSET_HASH; + else if (!strcmp(res->inset_type, "fdir_inset")) + inset_type = INSET_FDIR; + else if (!strcmp(res->inset_type, "fdir_flx_inset")) + inset_type = INSET_FDIR_FLX; + + memset(&inset, 0, sizeof(inset)); + + ret = rte_pmd_i40e_inset_set(res->port_id, res->pctype_id, + &inset, inset_type); + if (ret) { + printf("Failed to clear input set.\n"); + return; + } + +#endif + + if (ret == -ENOTSUP) + printf("Function not supported\n"); +} + +cmdline_parse_token_string_t cmd_clear_input_set_port = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + port, "port"); +cmdline_parse_token_string_t cmd_clear_input_set_cfg = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + cfg, "config"); +cmdline_parse_token_num_t cmd_clear_input_set_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_clear_input_set_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_clear_input_set_pctype = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + pctype, "pctype"); +cmdline_parse_token_num_t cmd_clear_input_set_pctype_id = + TOKEN_NUM_INITIALIZER(struct cmd_clear_input_set_result, + pctype_id, UINT8); +cmdline_parse_token_string_t cmd_clear_input_set_inset_type = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + inset_type, + "hash_inset#fdir_inset#fdir_flx_inset"); +cmdline_parse_token_string_t cmd_clear_input_set_clear = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + clear, "clear"); +cmdline_parse_token_string_t cmd_clear_input_set_all = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + all, "all"); + +cmdline_parse_inst_t cmd_clear_input_set = { + .f = cmd_clear_input_set_parsed, + .data = NULL, + .help_str = "port config <port_id> pctype <pctype_id> hash_inset|" + "fdir_inset|fdir_flx_inset clear all", + .tokens = { + (void *)&cmd_clear_input_set_port, + (void *)&cmd_clear_input_set_cfg, + (void *)&cmd_clear_input_set_port_id, + (void *)&cmd_clear_input_set_pctype, + (void *)&cmd_clear_input_set_pctype_id, + (void *)&cmd_clear_input_set_inset_type, + (void *)&cmd_clear_input_set_clear, + (void *)&cmd_clear_input_set_all, + NULL, + }, +}; + /* show vf stats */ /* Common result structure for show vf stats */ @@ -15554,6 +16049,7 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_set_link_down, (cmdline_parse_inst_t *)&cmd_reset, (cmdline_parse_inst_t *)&cmd_set_numbers, + (cmdline_parse_inst_t *)&cmd_set_log, (cmdline_parse_inst_t *)&cmd_set_txpkts, (cmdline_parse_inst_t *)&cmd_set_txsplit, (cmdline_parse_inst_t *)&cmd_set_fwd_list, @@ -15625,6 +16121,7 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_read_rxd_txd, (cmdline_parse_inst_t *)&cmd_stop, (cmdline_parse_inst_t *)&cmd_mac_addr, + (cmdline_parse_inst_t *)&cmd_set_fwd_eth_peer, (cmdline_parse_inst_t *)&cmd_set_qmap, (cmdline_parse_inst_t *)&cmd_set_xstats_hide_zero, (cmdline_parse_inst_t *)&cmd_operate_port, @@ -15639,7 +16136,6 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_config_rx_mode_flag, (cmdline_parse_inst_t *)&cmd_config_rss, (cmdline_parse_inst_t *)&cmd_config_rxtx_queue, - (cmdline_parse_inst_t *)&cmd_config_txqflags, (cmdline_parse_inst_t *)&cmd_config_rss_reta, (cmdline_parse_inst_t *)&cmd_showport_reta, (cmdline_parse_inst_t *)&cmd_config_burst, @@ -15672,6 +16168,7 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_add_del_l2_flow_director, (cmdline_parse_inst_t *)&cmd_add_del_mac_vlan_flow_director, (cmdline_parse_inst_t *)&cmd_add_del_tunnel_flow_director, + (cmdline_parse_inst_t *)&cmd_add_del_raw_flow_director, (cmdline_parse_inst_t *)&cmd_flush_flow_director, (cmdline_parse_inst_t *)&cmd_set_flow_director_ip_mask, (cmdline_parse_inst_t *)&cmd_set_flow_director_mac_vlan_mask, @@ -15685,12 +16182,16 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_set_hash_input_set, (cmdline_parse_inst_t *)&cmd_set_fdir_input_set, (cmdline_parse_inst_t *)&cmd_flow, + (cmdline_parse_inst_t *)&cmd_show_port_meter_cap, (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_srtcm, (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm, (cmdline_parse_inst_t *)&cmd_del_port_meter_profile, - (cmdline_parse_inst_t *)&cmd_set_port_meter, + (cmdline_parse_inst_t *)&cmd_create_port_meter, + (cmdline_parse_inst_t *)&cmd_enable_port_meter, + (cmdline_parse_inst_t *)&cmd_disable_port_meter, (cmdline_parse_inst_t *)&cmd_del_port_meter, (cmdline_parse_inst_t *)&cmd_set_port_meter_profile, + (cmdline_parse_inst_t *)&cmd_set_port_meter_dscp_table, (cmdline_parse_inst_t *)&cmd_set_port_meter_policer_action, (cmdline_parse_inst_t *)&cmd_set_port_meter_stats_mask, (cmdline_parse_inst_t *)&cmd_show_port_meter_stats, @@ -15737,6 +16238,8 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_ddp_del, (cmdline_parse_inst_t *)&cmd_ddp_get_list, (cmdline_parse_inst_t *)&cmd_ddp_get_info, + (cmdline_parse_inst_t *)&cmd_cfg_input_set, + (cmdline_parse_inst_t *)&cmd_clear_input_set, (cmdline_parse_inst_t *)&cmd_show_vf_stats, (cmdline_parse_inst_t *)&cmd_clear_vf_stats, (cmdline_parse_inst_t *)&cmd_ptype_mapping_get, diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c index df16d2ab..a5cf84f7 100644 --- a/app/test-pmd/cmdline_flow.c +++ b/app/test-pmd/cmdline_flow.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox. */ #include <stddef.h> @@ -175,6 +147,9 @@ enum index { ITEM_GTP_TEID, ITEM_GTPC, ITEM_GTPU, + ITEM_GENEVE, + ITEM_GENEVE_VNI, + ITEM_GENEVE_PROTO, /* Validate/create actions. */ ACTIONS, @@ -460,6 +435,7 @@ static const enum index next_item[] = { ITEM_GTP, ITEM_GTPC, ITEM_GTPU, + ITEM_GENEVE, ZERO, }; @@ -603,6 +579,13 @@ static const enum index item_gtp[] = { ZERO, }; +static const enum index item_geneve[] = { + ITEM_GENEVE_VNI, + ITEM_GENEVE_PROTO, + ITEM_NEXT, + ZERO, +}; + static const enum index next_action[] = { ACTION_END, ACTION_VOID, @@ -1470,6 +1453,26 @@ static const struct token token_list[] = { .next = NEXT(item_gtp), .call = parse_vc, }, + [ITEM_GENEVE] = { + .name = "geneve", + .help = "match GENEVE header", + .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), + .next = NEXT(item_geneve), + .call = parse_vc, + }, + [ITEM_GENEVE_VNI] = { + .name = "vni", + .help = "virtual network identifier", + .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)), + }, + [ITEM_GENEVE_PROTO] = { + .name = "protocol", + .help = "GENEVE protocol type", + .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, + protocol)), + }, /* Validate/create actions. */ [ACTIONS] = { diff --git a/app/test-pmd/cmdline_mtr.c b/app/test-pmd/cmdline_mtr.c index d8d806d7..f908fb35 100644 --- a/app/test-pmd/cmdline_mtr.c +++ b/app/test-pmd/cmdline_mtr.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include <cmdline_parse.h> @@ -42,6 +13,9 @@ #include "testpmd.h" #include "cmdline_mtr.h" +#define PARSE_DELIMITER " \f\n\r\t\v" +#define MAX_DSCP_TABLE_ENTRIES 64 + /** Display Meter Error Message */ static void print_err_msg(struct rte_mtr_error *error) @@ -83,23 +57,272 @@ print_err_msg(struct rte_mtr_error *error) } static int +parse_uint(uint64_t *value, const char *str) +{ + char *next = NULL; + uint64_t n; + + errno = 0; + /* Parse number string */ + n = strtol(str, &next, 10); + if (errno != 0 || str == next || *next != '\0') + return -1; + + *value = n; + + return 0; +} + +static int +parse_dscp_table_entries(char *str, enum rte_mtr_color *dscp_table) +{ + char *token; + int i = 0; + + token = strtok_r(str, PARSE_DELIMITER, &str); + if (token == NULL) + return 0; + + /* Allocate memory for dscp table */ + dscp_table = (enum rte_mtr_color *)malloc(MAX_DSCP_TABLE_ENTRIES * + sizeof(enum rte_mtr_color)); + if (dscp_table == NULL) + return -1; + + while (1) { + if (strcmp(token, "G") == 0 || + strcmp(token, "g") == 0) + dscp_table[i++] = RTE_MTR_GREEN; + else if (strcmp(token, "Y") == 0 || + strcmp(token, "y") == 0) + dscp_table[i++] = RTE_MTR_YELLOW; + else if (strcmp(token, "R") == 0 || + strcmp(token, "r") == 0) + dscp_table[i++] = RTE_MTR_RED; + else { + free(dscp_table); + return -1; + } + if (i == MAX_DSCP_TABLE_ENTRIES) + break; + + token = strtok_r(str, PARSE_DELIMITER, &str); + if (token == NULL) { + free(dscp_table); + return -1; + } + } + return 0; +} + +static int +parse_meter_color_str(char *c_str, uint32_t *use_prev_meter_color, + enum rte_mtr_color *dscp_table) +{ + char *token; + uint64_t previous_mtr_color = 0; + int ret; + + /* First token: use previous meter color */ + token = strtok_r(c_str, PARSE_DELIMITER, &c_str); + if (token == NULL) + return -1; + + ret = parse_uint(&previous_mtr_color, token); + if (ret != 0) + return -1; + + /* Check if previous meter color to be used */ + if (previous_mtr_color) { + *use_prev_meter_color = previous_mtr_color; + return 0; + } + + /* Parse dscp table entries */ + ret = parse_dscp_table_entries(c_str, dscp_table); + if (ret != 0) + return -1; + + return 0; +} + +static int string_to_policer_action(char *s) { - if (strcmp(s, "G") == 0) + if ((strcmp(s, "G") == 0) || (strcmp(s, "g") == 0)) return MTR_POLICER_ACTION_COLOR_GREEN; - if (strcmp(s, "Y") == 0) + if ((strcmp(s, "Y") == 0) || (strcmp(s, "y") == 0)) return MTR_POLICER_ACTION_COLOR_YELLOW; - if (strcmp(s, "R") == 0) + if ((strcmp(s, "R") == 0) || (strcmp(s, "r") == 0)) return MTR_POLICER_ACTION_COLOR_RED; - if (strcmp(s, "D") == 0) + if ((strcmp(s, "D") == 0) || (strcmp(s, "d") == 0)) return MTR_POLICER_ACTION_DROP; return -1; } +static int +parse_policer_action_string(char *p_str, uint32_t action_mask, + enum rte_mtr_policer_action actions[]) +{ + char *token; + int count = __builtin_popcount(action_mask); + int g_color = 0, y_color = 0, action, i; + + for (i = 0; i < count; i++) { + token = strtok_r(p_str, PARSE_DELIMITER, &p_str); + if (token == NULL) + return -1; + + action = string_to_policer_action(token); + if (action == -1) + return -1; + + if (g_color == 0 && (action_mask & 0x1)) { + actions[RTE_MTR_GREEN] = action; + g_color = 1; + } else if (y_color == 0 && (action_mask & 0x2)) { + actions[RTE_MTR_YELLOW] = action; + y_color = 1; + } else + actions[RTE_MTR_RED] = action; + } + return 0; +} + +static int +parse_multi_token_string(char *t_str, uint16_t *port_id, + uint32_t *mtr_id, enum rte_mtr_color *dscp_table) +{ + char *token; + uint64_t val; + int ret; + + /* First token: port id */ + token = strtok_r(t_str, PARSE_DELIMITER, &t_str); + if (token == NULL) + return -1; + + ret = parse_uint(&val, token); + if (ret != 0 || val > UINT16_MAX) + return -1; + + *port_id = val; + + /* Second token: meter id */ + token = strtok_r(t_str, PARSE_DELIMITER, &t_str); + if (token == NULL) + return 0; + + ret = parse_uint(&val, token); + if (ret != 0 || val > UINT32_MAX) + return -1; + + *mtr_id = val; + + ret = parse_dscp_table_entries(t_str, dscp_table); + if (ret != 0) + return -1; + + return 0; +} + +/* *** Show Port Meter Capabilities *** */ +struct cmd_show_port_meter_cap_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t cap; + uint16_t port_id; +}; + +cmdline_parse_token_string_t cmd_show_port_meter_cap_show = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_cap_result, show, "show"); +cmdline_parse_token_string_t cmd_show_port_meter_cap_port = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_cap_result, port, "port"); +cmdline_parse_token_string_t cmd_show_port_meter_cap_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_cap_result, meter, "meter"); +cmdline_parse_token_string_t cmd_show_port_meter_cap_cap = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_cap_result, cap, "cap"); +cmdline_parse_token_num_t cmd_show_port_meter_cap_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_show_port_meter_cap_result, port_id, UINT16); + +static void cmd_show_port_meter_cap_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_show_port_meter_cap_result *res = parsed_result; + struct rte_mtr_capabilities cap; + struct rte_mtr_error error; + uint16_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&cap, 0, sizeof(struct rte_mtr_capabilities)); + ret = rte_mtr_capabilities_get(port_id, &cap, &error); + if (ret) { + print_err_msg(&error); + return; + } + + printf("\n**** Port Meter Object Capabilities ****\n\n"); + printf("cap.n_max %" PRIu32 "\n", cap.n_max); + printf("cap.n_shared_max %" PRIu32 "\n", cap.n_shared_max); + printf("cap.identical %" PRId32 "\n", cap.identical); + printf("cap.shared_identical %" PRId32 "\n", + cap.shared_identical); + printf("cap.shared_n_flows_per_mtr_max %" PRIu32 "\n", + cap.shared_n_flows_per_mtr_max); + printf("cap.chaining_n_mtrs_per_flow_max %" PRIu32 "\n", + cap.chaining_n_mtrs_per_flow_max); + printf("cap.chaining_use_prev_mtr_color_supported %" PRId32 "\n", + cap.chaining_use_prev_mtr_color_supported); + printf("cap.chaining_use_prev_mtr_color_enforced %" PRId32 "\n", + cap.chaining_use_prev_mtr_color_enforced); + printf("cap.meter_srtcm_rfc2697_n_max %" PRIu32 "\n", + cap.meter_srtcm_rfc2697_n_max); + printf("cap.meter_trtcm_rfc2698_n_max %" PRIu32 "\n", + cap.meter_trtcm_rfc2698_n_max); + printf("cap.meter_trtcm_rfc4115_n_max %" PRIu32 "\n", + cap.meter_trtcm_rfc4115_n_max); + printf("cap.meter_rate_max %" PRIu64 "\n", cap.meter_rate_max); + printf("cap.color_aware_srtcm_rfc2697_supported %" PRId32 "\n", + cap.color_aware_srtcm_rfc2697_supported); + printf("cap.color_aware_trtcm_rfc2698_supported %" PRId32 "\n", + cap.color_aware_trtcm_rfc2698_supported); + printf("cap.color_aware_trtcm_rfc4115_supported %" PRId32 "\n", + cap.color_aware_trtcm_rfc4115_supported); + printf("cap.policer_action_recolor_supported %" PRId32 "\n", + cap.policer_action_recolor_supported); + printf("cap.policer_action_drop_supported %" PRId32 "\n", + cap.policer_action_drop_supported); + printf("cap.stats_mask %" PRIx64 "\n", cap.stats_mask); +} + +cmdline_parse_inst_t cmd_show_port_meter_cap = { + .f = cmd_show_port_meter_cap_parsed, + .data = NULL, + .help_str = "Show port meter cap", + .tokens = { + (void *)&cmd_show_port_meter_cap_show, + (void *)&cmd_show_port_meter_cap_port, + (void *)&cmd_show_port_meter_cap_meter, + (void *)&cmd_show_port_meter_cap_cap, + (void *)&cmd_show_port_meter_cap_port_id, + NULL, + }, +}; + /* *** Add Port Meter Profile srtcm_rfc2697 *** */ struct cmd_add_port_meter_profile_srtcm_result { cmdline_fixed_string_t add; @@ -112,7 +335,6 @@ struct cmd_add_port_meter_profile_srtcm_result { uint64_t cir; uint64_t cbs; uint64_t ebs; - uint8_t color_aware; }; cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_add = @@ -171,7 +393,7 @@ static void cmd_add_port_meter_profile_srtcm_parsed(void *parsed_result, /* Private shaper profile params */ memset(&mp, 0, sizeof(struct rte_mtr_meter_profile)); - mp.alg = 0; + mp.alg = RTE_MTR_SRTCM_RFC2697; mp.srtcm_rfc2697.cir = res->cir; mp.srtcm_rfc2697.cbs = res->cbs; mp.srtcm_rfc2697.ebs = res->ebs; @@ -277,7 +499,7 @@ static void cmd_add_port_meter_profile_trtcm_parsed(void *parsed_result, /* Private shaper profile params */ memset(&mp, 0, sizeof(struct rte_mtr_meter_profile)); - mp.alg = 0; + mp.alg = RTE_MTR_TRTCM_RFC2698; mp.trtcm_rfc2698.cir = res->cir; mp.trtcm_rfc2698.pir = res->pir; mp.trtcm_rfc2698.cbs = res->cbs; @@ -389,7 +611,7 @@ static void cmd_add_port_meter_profile_trtcm_rfc4115_parsed( /* Private shaper profile params */ memset(&mp, 0, sizeof(struct rte_mtr_meter_profile)); - mp.alg = 0; + mp.alg = RTE_MTR_TRTCM_RFC4115; mp.trtcm_rfc4115.cir = res->cir; mp.trtcm_rfc4115.eir = res->eir; mp.trtcm_rfc4115.cbs = res->cbs; @@ -493,65 +715,75 @@ cmdline_parse_inst_t cmd_del_port_meter_profile = { }; /* *** Create Port Meter Object *** */ -struct cmd_set_port_meter_result { - cmdline_fixed_string_t set; +struct cmd_create_port_meter_result { + cmdline_fixed_string_t create; cmdline_fixed_string_t port; cmdline_fixed_string_t meter; uint16_t port_id; uint32_t mtr_id; uint32_t profile_id; + cmdline_fixed_string_t meter_enable; cmdline_fixed_string_t g_action; cmdline_fixed_string_t y_action; cmdline_fixed_string_t r_action; uint64_t statistics_mask; uint32_t shared; + cmdline_multi_string_t meter_input_color; }; -cmdline_parse_token_string_t cmd_set_port_meter_set = +cmdline_parse_token_string_t cmd_create_port_meter_create = TOKEN_STRING_INITIALIZER( - struct cmd_set_port_meter_result, set, "set"); -cmdline_parse_token_string_t cmd_set_port_meter_port = + struct cmd_create_port_meter_result, create, "create"); +cmdline_parse_token_string_t cmd_create_port_meter_port = TOKEN_STRING_INITIALIZER( - struct cmd_set_port_meter_result, port, "port"); -cmdline_parse_token_string_t cmd_set_port_meter_meter = + struct cmd_create_port_meter_result, port, "port"); +cmdline_parse_token_string_t cmd_create_port_meter_meter = TOKEN_STRING_INITIALIZER( - struct cmd_set_port_meter_result, meter, "meter"); -cmdline_parse_token_num_t cmd_set_port_meter_port_id = + struct cmd_create_port_meter_result, meter, "meter"); +cmdline_parse_token_num_t cmd_create_port_meter_port_id = TOKEN_NUM_INITIALIZER( - struct cmd_set_port_meter_result, port_id, UINT16); -cmdline_parse_token_num_t cmd_set_port_meter_mtr_id = + struct cmd_create_port_meter_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_create_port_meter_mtr_id = TOKEN_NUM_INITIALIZER( - struct cmd_set_port_meter_result, mtr_id, UINT32); -cmdline_parse_token_num_t cmd_set_port_meter_profile_id = + struct cmd_create_port_meter_result, mtr_id, UINT32); +cmdline_parse_token_num_t cmd_create_port_meter_profile_id = TOKEN_NUM_INITIALIZER( - struct cmd_set_port_meter_result, profile_id, UINT32); -cmdline_parse_token_string_t cmd_set_port_meter_g_action = - TOKEN_STRING_INITIALIZER(struct cmd_set_port_meter_result, - g_action, "R#Y#G#D"); -cmdline_parse_token_string_t cmd_set_port_meter_y_action = - TOKEN_STRING_INITIALIZER(struct cmd_set_port_meter_result, - y_action, "R#Y#G#D"); -cmdline_parse_token_string_t cmd_set_port_meter_r_action = - TOKEN_STRING_INITIALIZER(struct cmd_set_port_meter_result, - r_action, "R#Y#G#D"); -cmdline_parse_token_num_t cmd_set_port_meter_statistics_mask = - TOKEN_NUM_INITIALIZER(struct cmd_set_port_meter_result, + struct cmd_create_port_meter_result, profile_id, UINT32); +cmdline_parse_token_string_t cmd_create_port_meter_meter_enable = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + meter_enable, "yes#no"); +cmdline_parse_token_string_t cmd_create_port_meter_g_action = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + g_action, "R#Y#G#D#r#y#g#d"); +cmdline_parse_token_string_t cmd_create_port_meter_y_action = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + y_action, "R#Y#G#D#r#y#g#d"); +cmdline_parse_token_string_t cmd_create_port_meter_r_action = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + r_action, "R#Y#G#D#r#y#g#d"); +cmdline_parse_token_num_t cmd_create_port_meter_statistics_mask = + TOKEN_NUM_INITIALIZER(struct cmd_create_port_meter_result, statistics_mask, UINT64); -cmdline_parse_token_num_t cmd_set_port_meter_shared = - TOKEN_NUM_INITIALIZER(struct cmd_set_port_meter_result, +cmdline_parse_token_num_t cmd_create_port_meter_shared = + TOKEN_NUM_INITIALIZER(struct cmd_create_port_meter_result, shared, UINT32); +cmdline_parse_token_string_t cmd_create_port_meter_input_color = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + meter_input_color, TOKEN_STRING_MULTI); -static void cmd_set_port_meter_parsed(void *parsed_result, +static void cmd_create_port_meter_parsed(void *parsed_result, __attribute__((unused)) struct cmdline *cl, __attribute__((unused)) void *data) { - struct cmd_set_port_meter_result *res = parsed_result; + struct cmd_create_port_meter_result *res = parsed_result; struct rte_mtr_error error; struct rte_mtr_params params; uint32_t mtr_id = res->mtr_id; uint32_t shared = res->shared; + uint32_t use_prev_meter_color = 0; uint16_t port_id = res->port_id; - + enum rte_mtr_color *dscp_table = NULL; + char *c_str = res->meter_input_color; int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) @@ -560,9 +792,22 @@ static void cmd_set_port_meter_parsed(void *parsed_result, /* Meter params */ memset(¶ms, 0, sizeof(struct rte_mtr_params)); params.meter_profile_id = res->profile_id; - params.use_prev_mtr_color = 1; - params.dscp_table = NULL; - params.meter_enable = 1; + + /* Parse meter input color string params */ + ret = parse_meter_color_str(c_str, &use_prev_meter_color, dscp_table); + if (ret) { + printf(" Meter input color params string parse error\n"); + return; + } + + params.use_prev_mtr_color = use_prev_meter_color; + params.dscp_table = dscp_table; + + if (strcmp(res->meter_enable, "yes") == 0) + params.meter_enable = 1; + else + params.meter_enable = 0; + params.action[RTE_MTR_GREEN] = string_to_policer_action(res->g_action); params.action[RTE_MTR_YELLOW] = @@ -573,27 +818,152 @@ static void cmd_set_port_meter_parsed(void *parsed_result, ret = rte_mtr_create(port_id, mtr_id, ¶ms, shared, &error); if (ret != 0) { + free(dscp_table); + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_create_port_meter = { + .f = cmd_create_port_meter_parsed, + .data = NULL, + .help_str = "Create port meter", + .tokens = { + (void *)&cmd_create_port_meter_create, + (void *)&cmd_create_port_meter_port, + (void *)&cmd_create_port_meter_meter, + (void *)&cmd_create_port_meter_port_id, + (void *)&cmd_create_port_meter_mtr_id, + (void *)&cmd_create_port_meter_profile_id, + (void *)&cmd_create_port_meter_meter_enable, + (void *)&cmd_create_port_meter_g_action, + (void *)&cmd_create_port_meter_y_action, + (void *)&cmd_create_port_meter_r_action, + (void *)&cmd_create_port_meter_statistics_mask, + (void *)&cmd_create_port_meter_shared, + (void *)&cmd_create_port_meter_input_color, + NULL, + }, +}; + +/* *** Enable Meter of MTR Object *** */ +struct cmd_enable_port_meter_result { + cmdline_fixed_string_t enable; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + uint16_t port_id; + uint32_t mtr_id; +}; + +cmdline_parse_token_string_t cmd_enable_port_meter_enable = + TOKEN_STRING_INITIALIZER( + struct cmd_enable_port_meter_result, enable, "enable"); +cmdline_parse_token_string_t cmd_enable_port_meter_port = + TOKEN_STRING_INITIALIZER( + struct cmd_enable_port_meter_result, port, "port"); +cmdline_parse_token_string_t cmd_enable_port_meter_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_enable_port_meter_result, meter, "meter"); +cmdline_parse_token_num_t cmd_enable_port_meter_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_enable_port_meter_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_enable_port_meter_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_enable_port_meter_result, mtr_id, UINT32); + +static void cmd_enable_port_meter_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_enable_port_meter_result *res = parsed_result; + struct rte_mtr_error error; + uint32_t mtr_id = res->mtr_id; + uint16_t port_id = res->port_id; + + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Enable Meter */ + ret = rte_mtr_meter_enable(port_id, mtr_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_enable_port_meter = { + .f = cmd_enable_port_meter_parsed, + .data = NULL, + .help_str = "Enable port meter", + .tokens = { + (void *)&cmd_enable_port_meter_enable, + (void *)&cmd_enable_port_meter_port, + (void *)&cmd_enable_port_meter_meter, + (void *)&cmd_enable_port_meter_port_id, + (void *)&cmd_enable_port_meter_mtr_id, + NULL, + }, +}; + +/* *** Disable Meter of MTR Object *** */ +struct cmd_disable_port_meter_result { + cmdline_fixed_string_t disable; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + uint16_t port_id; + uint32_t mtr_id; +}; + +cmdline_parse_token_string_t cmd_disable_port_meter_disable = + TOKEN_STRING_INITIALIZER( + struct cmd_disable_port_meter_result, disable, "disable"); +cmdline_parse_token_string_t cmd_disable_port_meter_port = + TOKEN_STRING_INITIALIZER( + struct cmd_disable_port_meter_result, port, "port"); +cmdline_parse_token_string_t cmd_disable_port_meter_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_disable_port_meter_result, meter, "meter"); +cmdline_parse_token_num_t cmd_disable_port_meter_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_disable_port_meter_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_disable_port_meter_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_disable_port_meter_result, mtr_id, UINT32); + +static void cmd_disable_port_meter_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_disable_port_meter_result *res = parsed_result; + struct rte_mtr_error error; + uint32_t mtr_id = res->mtr_id; + uint16_t port_id = res->port_id; + + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Disable Meter */ + ret = rte_mtr_meter_disable(port_id, mtr_id, &error); + if (ret != 0) { print_err_msg(&error); return; } } -cmdline_parse_inst_t cmd_set_port_meter = { - .f = cmd_set_port_meter_parsed, +cmdline_parse_inst_t cmd_disable_port_meter = { + .f = cmd_disable_port_meter_parsed, .data = NULL, - .help_str = "Set port meter", + .help_str = "Disable port meter", .tokens = { - (void *)&cmd_set_port_meter_set, - (void *)&cmd_set_port_meter_port, - (void *)&cmd_set_port_meter_meter, - (void *)&cmd_set_port_meter_port_id, - (void *)&cmd_set_port_meter_mtr_id, - (void *)&cmd_set_port_meter_profile_id, - (void *)&cmd_set_port_meter_g_action, - (void *)&cmd_set_port_meter_y_action, - (void *)&cmd_set_port_meter_r_action, - (void *)&cmd_set_port_meter_statistics_mask, - (void *)&cmd_set_port_meter_shared, + (void *)&cmd_disable_port_meter_disable, + (void *)&cmd_disable_port_meter_port, + (void *)&cmd_disable_port_meter_meter, + (void *)&cmd_disable_port_meter_port_id, + (void *)&cmd_disable_port_meter_mtr_id, NULL, }, }; @@ -732,6 +1102,78 @@ cmdline_parse_inst_t cmd_set_port_meter_profile = { }, }; +/* *** Set Port Meter DSCP Table *** */ +struct cmd_set_port_meter_dscp_table_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t dscp_table; + cmdline_multi_string_t token_string; +}; + +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_set = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_dscp_table_result, set, "set"); +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_port = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_dscp_table_result, port, "port"); +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_dscp_table_result, meter, "meter"); +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_dscp_table = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_dscp_table_result, + dscp_table, "dscp table"); +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_token_string = + TOKEN_STRING_INITIALIZER(struct cmd_set_port_meter_dscp_table_result, + token_string, TOKEN_STRING_MULTI); + +static void cmd_set_port_meter_dscp_table_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_port_meter_dscp_table_result *res = parsed_result; + struct rte_mtr_error error; + enum rte_mtr_color *dscp_table = NULL; + char *t_str = res->token_string; + uint32_t mtr_id = 0; + uint16_t port_id; + int ret; + + /* Parse string */ + ret = parse_multi_token_string(t_str, &port_id, &mtr_id, dscp_table); + if (ret) { + printf(" Multi token string parse error\n"); + return; + } + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Update Meter DSCP Table*/ + ret = rte_mtr_meter_dscp_table_update(port_id, mtr_id, + dscp_table, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } + free(dscp_table); +} + +cmdline_parse_inst_t cmd_set_port_meter_dscp_table = { + .f = cmd_set_port_meter_dscp_table_parsed, + .data = NULL, + .help_str = "Update port meter dscp table", + .tokens = { + (void *)&cmd_set_port_meter_dscp_table_set, + (void *)&cmd_set_port_meter_dscp_table_port, + (void *)&cmd_set_port_meter_dscp_table_meter, + (void *)&cmd_set_port_meter_dscp_table_dscp_table, + (void *)&cmd_set_port_meter_dscp_table_token_string, + NULL, + }, +}; + /* *** Set Port Meter Policer Action *** */ struct cmd_set_port_meter_policer_action_result { cmdline_fixed_string_t set; @@ -741,8 +1183,8 @@ struct cmd_set_port_meter_policer_action_result { cmdline_fixed_string_t action; uint16_t port_id; uint32_t mtr_id; - cmdline_fixed_string_t color; - cmdline_fixed_string_t policer_action; + uint32_t action_mask; + cmdline_multi_string_t policer_action; }; cmdline_parse_token_string_t cmd_set_port_meter_policer_action_set = @@ -771,56 +1213,60 @@ cmdline_parse_token_num_t cmd_set_port_meter_policer_action_mtr_id = TOKEN_NUM_INITIALIZER( struct cmd_set_port_meter_policer_action_result, mtr_id, UINT32); -cmdline_parse_token_string_t cmd_set_port_meter_policer_action_color = - TOKEN_STRING_INITIALIZER( - struct cmd_set_port_meter_policer_action_result, color, - "G#Y#R"); +cmdline_parse_token_num_t cmd_set_port_meter_policer_action_action_mask = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, action_mask, + UINT32); cmdline_parse_token_string_t cmd_set_port_meter_policer_action_policer_action = TOKEN_STRING_INITIALIZER( struct cmd_set_port_meter_policer_action_result, - policer_action, "G#Y#R#D"); + policer_action, TOKEN_STRING_MULTI); static void cmd_set_port_meter_policer_action_parsed(void *parsed_result, __attribute__((unused)) struct cmdline *cl, __attribute__((unused)) void *data) { struct cmd_set_port_meter_policer_action_result *res = parsed_result; - enum rte_mtr_color color; - enum rte_mtr_policer_action action[RTE_MTR_COLORS]; + enum rte_mtr_policer_action *actions; struct rte_mtr_error error; uint32_t mtr_id = res->mtr_id; + uint32_t action_mask = res->action_mask; uint16_t port_id = res->port_id; - char *c = res->color; - char *a = res->policer_action; + char *p_str = res->policer_action; int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; - /* Color */ - if (strcmp(c, "G") == 0) - color = RTE_MTR_GREEN; - else if (strcmp(c, "Y") == 0) - color = RTE_MTR_YELLOW; - else - color = RTE_MTR_RED; - - /* Action */ - if (strcmp(a, "G") == 0) - action[color] = MTR_POLICER_ACTION_COLOR_GREEN; - else if (strcmp(a, "Y") == 0) - action[color] = MTR_POLICER_ACTION_COLOR_YELLOW; - else if (strcmp(a, "R") == 0) - action[color] = MTR_POLICER_ACTION_COLOR_RED; - else - action[color] = MTR_POLICER_ACTION_DROP; + /* Check: action mask */ + if (action_mask == 0 || (action_mask & (~0x7UL))) { + printf(" Policer action mask not correct (error)\n"); + return; + } + + /* Allocate memory for policer actions */ + actions = (enum rte_mtr_policer_action *)malloc(RTE_MTR_COLORS * + sizeof(enum rte_mtr_policer_action)); + if (actions == NULL) { + printf("Memory for policer actions not allocated (error)\n"); + return; + } + /* Parse policer action string */ + ret = parse_policer_action_string(p_str, action_mask, actions); + if (ret) { + printf(" Policer action string parse error\n"); + free(actions); + return; + } ret = rte_mtr_policer_actions_update(port_id, mtr_id, - 1 << color, action, &error); + action_mask, actions, &error); if (ret != 0) { print_err_msg(&error); return; } + + free(actions); } cmdline_parse_inst_t cmd_set_port_meter_policer_action = { @@ -835,7 +1281,7 @@ cmdline_parse_inst_t cmd_set_port_meter_policer_action = { (void *)&cmd_set_port_meter_policer_action_action, (void *)&cmd_set_port_meter_policer_action_port_id, (void *)&cmd_set_port_meter_policer_action_mtr_id, - (void *)&cmd_set_port_meter_policer_action_color, + (void *)&cmd_set_port_meter_policer_action_action_mask, (void *)&cmd_set_port_meter_policer_action_policer_action, NULL, }, @@ -925,7 +1371,7 @@ struct cmd_show_port_meter_stats_result { cmdline_fixed_string_t stats; uint16_t port_id; uint32_t mtr_id; - uint32_t clear; + cmdline_fixed_string_t clear; }; cmdline_parse_token_string_t cmd_show_port_meter_stats_show = @@ -946,9 +1392,9 @@ cmdline_parse_token_num_t cmd_show_port_meter_stats_port_id = cmdline_parse_token_num_t cmd_show_port_meter_stats_mtr_id = TOKEN_NUM_INITIALIZER( struct cmd_show_port_meter_stats_result, mtr_id, UINT32); -cmdline_parse_token_num_t cmd_show_port_meter_stats_clear = - TOKEN_NUM_INITIALIZER( - struct cmd_show_port_meter_stats_result, clear, UINT32); +cmdline_parse_token_string_t cmd_show_port_meter_stats_clear = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_stats_result, clear, "yes#no"); static void cmd_show_port_meter_stats_parsed(void *parsed_result, __attribute__((unused)) struct cmdline *cl, @@ -959,13 +1405,16 @@ static void cmd_show_port_meter_stats_parsed(void *parsed_result, uint64_t stats_mask = 0; struct rte_mtr_error error; uint32_t mtr_id = res->mtr_id; - uint32_t clear = res->clear; + uint32_t clear = 0; uint16_t port_id = res->port_id; int ret; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; + if (strcmp(res->clear, "yes") == 0) + clear = 1; + memset(&stats, 0, sizeof(struct rte_mtr_stats)); ret = rte_mtr_stats_read(port_id, mtr_id, &stats, &stats_mask, clear, &error); diff --git a/app/test-pmd/cmdline_mtr.h b/app/test-pmd/cmdline_mtr.h index 5d599efc..e69d6da0 100644 --- a/app/test-pmd/cmdline_mtr.h +++ b/app/test-pmd/cmdline_mtr.h @@ -1,47 +1,22 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #ifndef _CMDLINE_MTR_H_ #define _CMDLINE_MTR_H_ /* Traffic Metering and Policing */ +extern cmdline_parse_inst_t cmd_show_port_meter_cap; extern cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm; extern cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm; extern cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115; extern cmdline_parse_inst_t cmd_del_port_meter_profile; -extern cmdline_parse_inst_t cmd_set_port_meter; +extern cmdline_parse_inst_t cmd_create_port_meter; +extern cmdline_parse_inst_t cmd_enable_port_meter; +extern cmdline_parse_inst_t cmd_disable_port_meter; extern cmdline_parse_inst_t cmd_del_port_meter; extern cmdline_parse_inst_t cmd_set_port_meter_profile; +extern cmdline_parse_inst_t cmd_set_port_meter_dscp_table; extern cmdline_parse_inst_t cmd_set_port_meter_policer_action; extern cmdline_parse_inst_t cmd_set_port_meter_stats_mask; extern cmdline_parse_inst_t cmd_show_port_meter_stats; diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c index 803fae44..35cad543 100644 --- a/app/test-pmd/cmdline_tm.c +++ b/app/test-pmd/cmdline_tm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include <cmdline_parse.h> diff --git a/app/test-pmd/cmdline_tm.h b/app/test-pmd/cmdline_tm.h index 9d5fdf0a..ba303607 100644 --- a/app/test-pmd/cmdline_tm.h +++ b/app/test-pmd/cmdline_tm.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #ifndef _CMDLINE_TM_H_ diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index cd2ac116..4bb255c6 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * Copyright 2013-2014 6WIND S.A. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation. + * Copyright 2013-2014 6WIND S.A. */ #include <stdarg.h> @@ -78,6 +49,7 @@ #include <rte_pmd_bnxt.h> #endif #include <rte_gro.h> +#include <cmdline_parse_etheraddr.h> #include "testpmd.h" @@ -417,7 +389,6 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id) printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); - printf("\nTX flags: %#x", qinfo.conf.txq_flags); printf("\nTX deferred start: %s", (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); printf("\nNumber of TXDs: %hu", qinfo.nb_desc); @@ -522,6 +493,16 @@ port_infos_display(portid_t port_id) } } + printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); + printf("Maximum configurable length of RX packet: %u\n", + dev_info.max_rx_pktlen); + if (dev_info.max_vfs) + printf("Maximum number of VFs: %u\n", dev_info.max_vfs); + if (dev_info.max_vmdq_pools) + printf("Maximum number of VMDq pools: %u\n", + dev_info.max_vmdq_pools); + + printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); printf("Max possible number of RXDs per queue: %hu\n", dev_info.rx_desc_lim.nb_max); @@ -529,6 +510,7 @@ port_infos_display(portid_t port_id) dev_info.rx_desc_lim.nb_min); printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); + printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); printf("Max possible number of TXDs per queue: %hu\n", dev_info.tx_desc_lim.nb_max); @@ -540,14 +522,12 @@ port_infos_display(portid_t port_id) void port_offload_cap_display(portid_t port_id) { - struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; static const char *info_border = "************"; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; - dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); printf("\n%s Port %d supported offload features: %s\n", @@ -555,7 +535,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { printf("VLAN stripped: "); - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) printf("on\n"); else printf("off\n"); @@ -563,7 +544,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { printf("Double VLANs stripped: "); - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) printf("on\n"); else printf("off\n"); @@ -571,7 +553,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { printf("RX IPv4 checksum: "); - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_IPV4_CKSUM) printf("on\n"); else printf("off\n"); @@ -579,7 +562,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { printf("RX UDP checksum: "); - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_UDP_CKSUM) printf("on\n"); else printf("off\n"); @@ -587,18 +571,26 @@ port_offload_cap_display(portid_t port_id) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { printf("RX TCP checksum: "); - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TCP_CKSUM) printf("on\n"); else printf("off\n"); } - if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) - printf("RX Outer IPv4 checksum: on"); + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { + printf("RX Outer IPv4 checksum: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) + printf("on\n"); + else + printf("off\n"); + } if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { printf("Large receive offload: "); - if (dev->data->dev_conf.rxmode.enable_lro) + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TCP_LRO) printf("on\n"); else printf("off\n"); @@ -606,8 +598,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { printf("VLAN insert: "); - if (ports[port_id].tx_ol_flags & - TESTPMD_TX_OFFLOAD_INSERT_VLAN) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_VLAN_INSERT) printf("on\n"); else printf("off\n"); @@ -615,7 +607,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { printf("HW timestamp: "); - if (dev->data->dev_conf.rxmode.hw_timestamp) + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TIMESTAMP) printf("on\n"); else printf("off\n"); @@ -623,8 +616,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { printf("Double VLANs insert: "); - if (ports[port_id].tx_ol_flags & - TESTPMD_TX_OFFLOAD_INSERT_QINQ) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_QINQ_INSERT) printf("on\n"); else printf("off\n"); @@ -632,7 +625,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { printf("TX IPv4 checksum: "); - if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_IPV4_CKSUM) printf("on\n"); else printf("off\n"); @@ -640,7 +634,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { printf("TX UDP checksum: "); - if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_UDP_CKSUM) printf("on\n"); else printf("off\n"); @@ -648,7 +643,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { printf("TX TCP checksum: "); - if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_TCP_CKSUM) printf("on\n"); else printf("off\n"); @@ -656,7 +652,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { printf("TX SCTP checksum: "); - if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SCTP_CKSUM) printf("on\n"); else printf("off\n"); @@ -664,8 +661,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { printf("TX Outer IPv4 checksum: "); - if (ports[port_id].tx_ol_flags & - TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) printf("on\n"); else printf("off\n"); @@ -673,7 +670,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { printf("TX TCP segmentation: "); - if (ports[port_id].tso_segsz != 0) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_TCP_TSO) printf("on\n"); else printf("off\n"); @@ -681,7 +679,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { printf("TX UDP segmentation: "); - if (ports[port_id].tso_segsz != 0) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_UDP_TSO) printf("on\n"); else printf("off\n"); @@ -689,7 +688,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { printf("TSO for VXLAN tunnel packet: "); - if (ports[port_id].tunnel_tso_segsz) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_VXLAN_TNL_TSO) printf("on\n"); else printf("off\n"); @@ -697,7 +697,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { printf("TSO for GRE tunnel packet: "); - if (ports[port_id].tunnel_tso_segsz) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_GRE_TNL_TSO) printf("on\n"); else printf("off\n"); @@ -705,7 +706,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { printf("TSO for IPIP tunnel packet: "); - if (ports[port_id].tunnel_tso_segsz) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_IPIP_TNL_TSO) printf("on\n"); else printf("off\n"); @@ -713,7 +715,8 @@ port_offload_cap_display(portid_t port_id) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { printf("TSO for GENEVE tunnel packet: "); - if (ports[port_id].tunnel_tso_segsz) + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_GENEVE_TNL_TSO) printf("on\n"); else printf("off\n"); @@ -724,11 +727,14 @@ port_offload_cap_display(portid_t port_id) int port_id_is_invalid(portid_t port_id, enum print_warning warning) { + uint16_t pid; + if (port_id == (portid_t)RTE_PORT_ALL) return 0; - if (rte_eth_dev_is_valid_port(port_id)) - return 0; + RTE_ETH_FOREACH_DEV(pid) + if (port_id == pid) + return 0; if (warning == ENABLED_WARN) printf("Invalid port %d\n", port_id); @@ -973,6 +979,7 @@ static const struct { MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), + MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), }; /** Compute storage space needed by item specification. */ @@ -1026,6 +1033,7 @@ static const struct { MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */ MK_FLOW_ACTION(PF, 0), MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), + MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), }; /** Compute storage space needed by action configuration. */ @@ -1655,33 +1663,46 @@ fwd_lcores_config_display(void) void rxtx_config_display(void) { - printf(" %s packet forwarding%s - CRC stripping %s - " - "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, + portid_t pid; + + printf(" %s packet forwarding%s packets/burst=%d\n", + cur_fwd_eng->fwd_mode_name, retry_enabled == 0 ? "" : " with retry", - rx_mode.hw_strip_crc ? "enabled" : "disabled", nb_pkt_per_burst); if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) printf(" packet len=%u - nb packet segments=%d\n", (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); - struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf; - struct rte_eth_txconf *tx_conf = &ports[0].tx_conf; - printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", nb_fwd_lcores, nb_fwd_ports); - printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", - nb_rxq, nb_rxd, rx_conf->rx_free_thresh); - printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", - rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh, - rx_conf->rx_thresh.wthresh); - printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", - nb_txq, nb_txd, tx_conf->tx_free_thresh); - printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", - tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, - tx_conf->tx_thresh.wthresh); - printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", - tx_conf->tx_rs_thresh, tx_conf->txq_flags); + + RTE_ETH_FOREACH_DEV(pid) { + struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf; + struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf; + + printf(" port %d:\n", (unsigned int)pid); + printf(" CRC stripping %s\n", + (ports[pid].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? + "enabled" : "disabled"); + printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", + nb_rxq, nb_rxd, rx_conf->rx_free_thresh); + printf(" RX threshold registers: pthresh=%d hthresh=%d " + " wthresh=%d\n", + rx_conf->rx_thresh.pthresh, + rx_conf->rx_thresh.hthresh, + rx_conf->rx_thresh.wthresh); + printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", + nb_txq, nb_txd, tx_conf->tx_free_thresh); + printf(" TX threshold registers: pthresh=%d hthresh=%d " + " wthresh=%d\n", + tx_conf->tx_thresh.pthresh, + tx_conf->tx_thresh.hthresh, + tx_conf->tx_thresh.wthresh); + printf(" TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n", + tx_conf->tx_rs_thresh, tx_conf->offloads); + } } void @@ -1861,23 +1882,40 @@ setup_fwd_config_of_each_lcore(struct fwd_config *cfg) } } +static portid_t +fwd_topology_tx_port_get(portid_t rxp) +{ + static int warning_once = 1; + + RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); + + switch (port_topology) { + default: + case PORT_TOPOLOGY_PAIRED: + if ((rxp & 0x1) == 0) { + if (rxp + 1 < cur_fwd_config.nb_fwd_ports) + return rxp + 1; + if (warning_once) { + printf("\nWarning! port-topology=paired" + " and odd forward ports number," + " the last port will pair with" + " itself.\n\n"); + warning_once = 0; + } + return rxp; + } + return rxp - 1; + case PORT_TOPOLOGY_CHAINED: + return (rxp + 1) % cur_fwd_config.nb_fwd_ports; + case PORT_TOPOLOGY_LOOP: + return rxp; + } +} + static void simple_fwd_config_setup(void) { portid_t i; - portid_t j; - portid_t inc = 2; - - if (port_topology == PORT_TOPOLOGY_CHAINED || - port_topology == PORT_TOPOLOGY_LOOP) { - inc = 1; - } else if (nb_fwd_ports % 2) { - printf("\nWarning! Cannot handle an odd number of ports " - "with the current port topology. Configuration " - "must be changed to have an even number of ports, " - "or relaunch application with " - "--port-topology=chained\n\n"); - } cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; cur_fwd_config.nb_fwd_streams = @@ -1896,26 +1934,14 @@ simple_fwd_config_setup(void) (lcoreid_t) cur_fwd_config.nb_fwd_ports; setup_fwd_config_of_each_lcore(&cur_fwd_config); - for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { - if (port_topology != PORT_TOPOLOGY_LOOP) - j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); - else - j = i; + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { fwd_streams[i]->rx_port = fwd_ports_ids[i]; fwd_streams[i]->rx_queue = 0; - fwd_streams[i]->tx_port = fwd_ports_ids[j]; + fwd_streams[i]->tx_port = + fwd_ports_ids[fwd_topology_tx_port_get(i)]; fwd_streams[i]->tx_queue = 0; fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; fwd_streams[i]->retry_enabled = retry_enabled; - - if (port_topology == PORT_TOPOLOGY_PAIRED) { - fwd_streams[j]->rx_port = fwd_ports_ids[j]; - fwd_streams[j]->rx_queue = 0; - fwd_streams[j]->tx_port = fwd_ports_ids[i]; - fwd_streams[j]->tx_queue = 0; - fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port; - fwd_streams[j]->retry_enabled = retry_enabled; - } } } @@ -1923,11 +1949,6 @@ simple_fwd_config_setup(void) * For the RSS forwarding test all streams distributed over lcores. Each stream * being composed of a RX queue to poll on a RX port for input messages, * associated with a TX queue of a TX port where to send forwarded packets. - * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" - * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two - * following rules: - * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd - * - TxQl = RxQj */ static void rss_fwd_config_setup(void) @@ -1959,18 +1980,7 @@ rss_fwd_config_setup(void) struct fwd_stream *fs; fs = fwd_streams[sm_id]; - - if ((rxp & 0x1) == 0) - txp = (portid_t) (rxp + 1); - else - txp = (portid_t) (rxp - 1); - /* - * if we are in loopback, simply send stuff out through the - * ingress port - */ - if (port_topology == PORT_TOPOLOGY_LOOP) - txp = rxp; - + txp = fwd_topology_tx_port_get(rxp); fs->rx_port = fwd_ports_ids[rxp]; fs->rx_queue = rxq; fs->tx_port = fwd_ports_ids[txp]; @@ -1985,11 +1995,7 @@ rss_fwd_config_setup(void) * Restart from RX queue 0 on next RX port */ rxq = 0; - if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) - rxp = (portid_t) - (rxp + ((nb_ports >> 1) / nb_fwd_ports)); - else - rxp = (portid_t) (rxp + 1); + rxp++; } } @@ -2194,6 +2200,24 @@ pkt_fwd_config_display(struct fwd_config *cfg) printf("\n"); } +void +set_fwd_eth_peer(portid_t port_id, char *peer_addr) +{ + uint8_t c, new_peer_addr[6]; + if (!rte_eth_dev_is_valid_port(port_id)) { + printf("Error: Invalid port number %i\n", port_id); + return; + } + if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr, + sizeof(new_peer_addr)) < 0) { + printf("Error: Invalid ethernet address: %s\n", peer_addr); + return; + } + for (c = 0; c < 6; c++) + peer_eth_addrs[port_id].addr_bytes[c] = + new_peer_addr[c]; +} + int set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) { @@ -2631,21 +2655,26 @@ vlan_extend_set(portid_t port_id, int on) { int diag; int vlan_offload; + uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; vlan_offload = rte_eth_dev_get_vlan_offload(port_id); - if (on) + if (on) { vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; - else + port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + } else { vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; + port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; + } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); if (diag < 0) printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " "diag=%d\n", port_id, on, diag); + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; } void @@ -2653,21 +2682,26 @@ rx_vlan_strip_set(portid_t port_id, int on) { int diag; int vlan_offload; + uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; vlan_offload = rte_eth_dev_get_vlan_offload(port_id); - if (on) + if (on) { vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; - else + port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; + port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); if (diag < 0) printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " "diag=%d\n", port_id, on, diag); + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; } void @@ -2689,21 +2723,26 @@ rx_vlan_filter_set(portid_t port_id, int on) { int diag; int vlan_offload; + uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; if (port_id_is_invalid(port_id, ENABLED_WARN)) return; vlan_offload = rte_eth_dev_get_vlan_offload(port_id); - if (on) + if (on) { vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; - else + port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + } else { vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; + port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; + } diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); if (diag < 0) printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " "diag=%d\n", port_id, on, diag); + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; } int @@ -2758,6 +2797,8 @@ void tx_vlan_set(portid_t port_id, uint16_t vlan_id) { int vlan_offload; + struct rte_eth_dev_info dev_info; + if (port_id_is_invalid(port_id, ENABLED_WARN)) return; if (vlan_id_is_invalid(vlan_id)) @@ -2768,9 +2809,15 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id) printf("Error, as QinQ has been enabled.\n"); return; } + rte_eth_dev_info_get(port_id, &dev_info); + if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { + printf("Error: vlan insert is not supported by port %d\n", + port_id); + return; + } tx_vlan_reset(port_id); - ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN; + ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; ports[port_id].tx_vlan_id = vlan_id; } @@ -2778,6 +2825,8 @@ void tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) { int vlan_offload; + struct rte_eth_dev_info dev_info; + if (port_id_is_invalid(port_id, ENABLED_WARN)) return; if (vlan_id_is_invalid(vlan_id)) @@ -2790,9 +2839,15 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) printf("Error, as QinQ hasn't been enabled.\n"); return; } + rte_eth_dev_info_get(port_id, &dev_info); + if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { + printf("Error: qinq insert not supported by port %d\n", + port_id); + return; + } tx_vlan_reset(port_id); - ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ; + ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT; ports[port_id].tx_vlan_id = vlan_id; ports[port_id].tx_vlan_id_outer = vlan_id_outer; } @@ -2802,8 +2857,9 @@ tx_vlan_reset(portid_t port_id) { if (port_id_is_invalid(port_id, ENABLED_WARN)) return; - ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN | - TESTPMD_TX_OFFLOAD_INSERT_QINQ); + ports[port_id].dev_conf.txmode.offloads &= + ~(DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT); ports[port_id].tx_vlan_id = 0; ports[port_id].tx_vlan_id_outer = 0; } @@ -3401,7 +3457,7 @@ port_dcb_info_display(portid_t port_id) } uint8_t * -open_ddp_package_file(const char *file_path, uint32_t *size) +open_file(const char *file_path, uint32_t *size) { int fd = open(file_path, O_RDONLY); off_t pkg_size; @@ -3441,7 +3497,7 @@ open_ddp_package_file(const char *file_path, uint32_t *size) if (ret < 0) { close(fd); printf("%s: File read operation failed\n", __func__); - close_ddp_package_file(buf); + close_file(buf); return NULL; } @@ -3454,7 +3510,7 @@ open_ddp_package_file(const char *file_path, uint32_t *size) } int -save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size) +save_file(const char *file_path, uint8_t *buf, uint32_t size) { FILE *fh = fopen(file_path, "wb"); @@ -3475,7 +3531,7 @@ save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size) } int -close_ddp_package_file(uint8_t *buf) +close_file(uint8_t *buf) { if (buf) { free((void *)buf); diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index aa29f5fc..5f5ab64a 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * Copyright 2014 6WIND S.A. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright 2014 6WIND S.A. */ #include <stdarg.h> @@ -316,7 +287,7 @@ parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info) * depending on the testpmd command line configuration */ static uint64_t process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, - uint16_t testpmd_ol_flags) + uint64_t tx_offloads) { struct ipv4_hdr *ipv4_hdr = l3_hdr; struct udp_hdr *udp_hdr; @@ -347,7 +318,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, if (info->l4_proto == IPPROTO_TCP && tso_segsz) { ol_flags |= PKT_TX_IP_CKSUM; } else { - if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ol_flags |= PKT_TX_IP_CKSUM; else ipv4_hdr->hdr_checksum = @@ -363,7 +334,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, /* do not recalculate udp cksum if it was 0 */ if (udp_hdr->dgram_cksum != 0) { udp_hdr->dgram_cksum = 0; - if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) + if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ol_flags |= PKT_TX_UDP_CKSUM; else { udp_hdr->dgram_cksum = @@ -376,7 +347,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, tcp_hdr->cksum = 0; if (tso_segsz) ol_flags |= PKT_TX_TCP_SEG; - else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) + else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ol_flags |= PKT_TX_TCP_CKSUM; else { tcp_hdr->cksum = @@ -390,7 +361,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, sctp_hdr->cksum = 0; /* sctp payload must be a multiple of 4 to be * offloaded */ - if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) && + if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && ((ipv4_hdr->total_length & 0x3) == 0)) { ol_flags |= PKT_TX_SCTP_CKSUM; } else { @@ -405,7 +376,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, /* Calculate the checksum of outer header */ static uint64_t process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, - uint16_t testpmd_ol_flags, int tso_enabled) + uint64_t tx_offloads, int tso_enabled) { struct ipv4_hdr *ipv4_hdr = outer_l3_hdr; struct ipv6_hdr *ipv6_hdr = outer_l3_hdr; @@ -416,7 +387,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, ipv4_hdr->hdr_checksum = 0; ol_flags |= PKT_TX_OUTER_IPV4; - if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) + if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ol_flags |= PKT_TX_OUTER_IP_CKSUM; else ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); @@ -563,7 +534,7 @@ pkt_copy_split(const struct rte_mbuf *pkt) while (i != 0) { p = rte_pktmbuf_alloc(mp); if (p == NULL) { - RTE_LOG(ERR, USER1, + TESTPMD_LOG(ERR, "failed to allocate %u-th of %u mbuf " "from mempool: %s\n", nb_seg - i, nb_seg, mp->name); @@ -572,7 +543,7 @@ pkt_copy_split(const struct rte_mbuf *pkt) md[--i] = p; if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) { - RTE_LOG(ERR, USER1, "mempool %s, %u-th segment: " + TESTPMD_LOG(ERR, "mempool %s, %u-th segment: " "expected seglen: %u, " "actual mbuf tailroom: %u\n", mp->name, i, seglen[i], @@ -585,7 +556,7 @@ pkt_copy_split(const struct rte_mbuf *pkt) if (i == 0) { rc = mbuf_copy_split(pkt, md, seglen, nb_seg); if (rc < 0) - RTE_LOG(ERR, USER1, + TESTPMD_LOG(ERR, "mbuf_copy_split for %p(len=%u, nb_seg=%u) " "into %u segments failed with error code: %d\n", pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc); @@ -646,7 +617,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) uint16_t nb_prep; uint16_t i; uint64_t rx_ol_flags, tx_ol_flags; - uint16_t testpmd_ol_flags; + uint64_t tx_offloads; uint32_t retry; uint32_t rx_bad_ip_csum; uint32_t rx_bad_l4_csum; @@ -678,7 +649,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) gro_enable = gro_ports[fs->rx_port].enable; txp = &ports[fs->tx_port]; - testpmd_ol_flags = txp->tx_ol_flags; + tx_offloads = txp->dev_conf.txmode.offloads; memset(&info, 0, sizeof(info)); info.tso_segsz = txp->tso_segsz; info.tunnel_tso_segsz = txp->tunnel_tso_segsz; @@ -714,7 +685,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) l3_hdr = (char *)eth_hdr + info.l2_len; /* check if it's a supported tunnel */ - if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) { + if (txp->parse_tunnel) { if (info.l4_proto == IPPROTO_UDP) { struct udp_hdr *udp_hdr; @@ -754,14 +725,14 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) /* process checksums of inner headers first */ tx_ol_flags |= process_inner_cksums(l3_hdr, &info, - testpmd_ol_flags); + tx_offloads); /* Then process outer headers if any. Note that the software * checksum will be wrong if one of the inner checksums is * processed in hardware. */ if (info.is_tunnel == 1) { tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info, - testpmd_ol_flags, + tx_offloads, !!(tx_ol_flags & PKT_TX_TCP_SEG)); } @@ -769,8 +740,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) if (info.is_tunnel == 1) { if (info.tunnel_tso_segsz || - (testpmd_ol_flags & - TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) || + (tx_offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || (tx_ol_flags & PKT_TX_OUTER_IPV6)) { m->outer_l2_len = info.outer_l2_len; m->outer_l3_len = info.outer_l3_len; @@ -832,17 +803,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) rte_be_to_cpu_16(info.outer_ethertype), info.outer_l3_len); /* dump tx packet info */ - if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM | - TESTPMD_TX_OFFLOAD_UDP_CKSUM | - TESTPMD_TX_OFFLOAD_TCP_CKSUM | - TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) || + if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM)) || info.tso_segsz != 0) printf("tx: m->l2_len=%d m->l3_len=%d " "m->l4_len=%d\n", m->l2_len, m->l3_len, m->l4_len); if (info.is_tunnel == 1) { - if ((testpmd_ol_flags & - TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) || + if ((tx_offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || (tx_ol_flags & PKT_TX_OUTER_IPV6)) printf("tx: m->outer_l2_len=%d " "m->outer_l3_len=%d\n", @@ -895,8 +866,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) if (ret >= 0) nb_segments += ret; else { - RTE_LOG(DEBUG, USER1, - "Unable to segment packet"); + TESTPMD_LOG(DEBUG, "Unable to segment packet"); rte_pktmbuf_free(pkts_burst[i]); } } diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c index acf9af94..0531b5d2 100644 --- a/app/test-pmd/flowgen.c +++ b/app/test-pmd/flowgen.c @@ -123,12 +123,13 @@ pkt_burst_flow_gen(struct fwd_stream *fs) struct ipv4_hdr *ip_hdr; struct udp_hdr *udp_hdr; uint16_t vlan_tci, vlan_tci_outer; - uint16_t ol_flags; + uint64_t ol_flags; uint16_t nb_rx; uint16_t nb_tx; uint16_t nb_pkt; uint16_t i; uint32_t retry; + uint64_t tx_offloads; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; uint64_t end_tsc; @@ -151,7 +152,14 @@ pkt_burst_flow_gen(struct fwd_stream *fs) mbp = current_fwd_lcore()->mbp; vlan_tci = ports[fs->tx_port].tx_vlan_id; vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer; - ol_flags = ports[fs->tx_port].tx_ol_flags; + + tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads; + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) + ol_flags = PKT_TX_VLAN_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) + ol_flags |= PKT_TX_QINQ_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) + ol_flags |= PKT_TX_MACSEC; for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { pkt = rte_mbuf_raw_alloc(mbp); diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c index d4b4c9eb..55d266d7 100644 --- a/app/test-pmd/icmpecho.c +++ b/app/test-pmd/icmpecho.c @@ -1,35 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2013 6WIND - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 6WIND S.A. */ #include <stdarg.h> diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c index 91ee7864..6ae802c8 100644 --- a/app/test-pmd/ieee1588fwd.c +++ b/app/test-pmd/ieee1588fwd.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation */ diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c index ff6de45c..9dce76ef 100644 --- a/app/test-pmd/iofwd.c +++ b/app/test-pmd/iofwd.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include <stdarg.h> diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c index f4a4bf29..2adce701 100644 --- a/app/test-pmd/macfwd.c +++ b/app/test-pmd/macfwd.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include <stdarg.h> @@ -84,6 +55,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs) uint16_t nb_tx; uint16_t i; uint64_t ol_flags = 0; + uint64_t tx_offloads; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; uint64_t end_tsc; @@ -107,11 +79,12 @@ pkt_burst_mac_forward(struct fwd_stream *fs) #endif fs->rx_packets += nb_rx; txp = &ports[fs->tx_port]; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN) + tx_offloads = txp->dev_conf.txmode.offloads; + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) ol_flags = PKT_TX_VLAN_PKT; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ) + if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) ol_flags |= PKT_TX_QINQ_PKT; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC) + if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) ol_flags |= PKT_TX_MACSEC; for (i = 0; i < nb_rx; i++) { if (likely(i < nb_rx - 1)) diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c index 721865c9..e2cc4812 100644 --- a/app/test-pmd/macswap.c +++ b/app/test-pmd/macswap.c @@ -84,6 +84,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs) uint16_t i; uint32_t retry; uint64_t ol_flags = 0; + uint64_t tx_offloads; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; uint64_t end_tsc; @@ -107,11 +108,12 @@ pkt_burst_mac_swap(struct fwd_stream *fs) #endif fs->rx_packets += nb_rx; txp = &ports[fs->tx_port]; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN) + tx_offloads = txp->dev_conf.txmode.offloads; + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) ol_flags = PKT_TX_VLAN_PKT; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ) + if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) ol_flags |= PKT_TX_QINQ_PKT; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC) + if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) ol_flags |= PKT_TX_MACSEC; for (i = 0; i < nb_rx; i++) { if (likely(i < nb_rx - 1)) diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build new file mode 100644 index 00000000..7ed74db2 --- /dev/null +++ b/app/test-pmd/meson.build @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +sources = files('cmdline.c', + 'cmdline_flow.c', + 'cmdline_mtr.c', + 'cmdline_tm.c', + 'config.c', + 'csumonly.c', + 'flowgen.c', + 'icmpecho.c', + 'ieee1588fwd.c', + 'iofwd.c', + 'macfwd.c', + 'macswap.c', + 'parameters.c', + 'rxonly.c', + 'testpmd.c', + 'txonly.c') + +deps = ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci'] +if dpdk_conf.has('RTE_LIBRTE_PDUMP') + deps += 'pdump' +endif +if dpdk_conf.has('RTE_LIBRTE_I40E_PMD') + deps += 'pmd_i40e' +endif +if dpdk_conf.has('RTE_LIBRTE_IXGBE_PMD') + deps += 'pmd_ixgbe' +endif +if dpdk_conf.has('RTE_LIBRTE_SOFTNIC_PMD') + sources += files('tm.c') + deps += 'pmd_softnic' +endif + +dep_objs = [] +foreach d:deps + dep_objs += get_variable(get_option('default_library') + '_rte_' + d) +endforeach +dep_objs += cc.find_library('execinfo', required: false) # for BSD only + +link_libs = [] +if get_option('default_library') == 'static' + link_libs = dpdk_drivers +endif + +executable('dpdk-testpmd', + sources, + c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'], + link_whole: link_libs, + dependencies: dep_objs, + install_rpath: join_paths(get_option('prefix'), driver_install_path), + install: true) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 84e7a63e..97d22b86 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include <errno.h> @@ -99,7 +70,7 @@ usage(char* progname) "--rss-ip | --rss-udp | " "--rxpt= | --rxht= | --rxwt= | --rxfreet= | " "--txpt= | --txht= | --txwt= | --txfreet= | " - "--txrst= | --txqflags= ]\n", + "--txrst= | --tx-offloads ]\n", progname); #ifdef RTE_LIBRTE_CMDLINE printf(" --interactive: run in interactive mode.\n"); @@ -162,10 +133,10 @@ usage(char* progname) printf(" --enable-lro: enable large receive offload.\n"); printf(" --enable-rx-cksum: enable rx hardware checksum offload.\n"); printf(" --enable-rx-timestamp: enable rx hardware timestamp offload.\n"); - printf(" --disable-hw-vlan: disable hardware vlan.\n"); - printf(" --disable-hw-vlan-filter: disable hardware vlan filter.\n"); - printf(" --disable-hw-vlan-strip: disable hardware vlan strip.\n"); - printf(" --disable-hw-vlan-extend: disable hardware vlan extend.\n"); + printf(" --enable-hw-vlan: enable hardware vlan.\n"); + printf(" --enable-hw-vlan-filter: enable hardware vlan filter.\n"); + printf(" --enable-hw-vlan-strip: enable hardware vlan strip.\n"); + printf(" --enable-hw-vlan-extend: enable hardware vlan extend.\n"); printf(" --enable-drop-en: enable per queue packet drop.\n"); printf(" --disable-rss: disable rss.\n"); printf(" --port-topology=N: set port topology (N: paired (default) or " @@ -192,8 +163,6 @@ usage(char* progname) "(0 <= N <= value of txd).\n"); printf(" --txrst=N: set the transmit RS bit threshold of TX rings to N " "(0 <= N <= value of txd).\n"); - printf(" --txqflags=0xXXXXXXXX: hexadecimal bitmask of TX queue flags " - "(0 <= N <= 0x7FFFFFFF).\n"); printf(" --tx-queue-stats-mapping=(port,queue,mapping)[,(port,queue,mapping]: " "tx queues statistics counters mapping " "(0 <= mapping <= %d).\n", RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); @@ -216,6 +185,7 @@ usage(char* progname) "disable print of designated event or all of them.\n"); printf(" --flow-isolate-all: " "requests flow API isolated mode on all ports at initialization time.\n"); + printf(" --tx-offloads=0xXXXXXXXX: hexadecimal bitmask of TX queue offloads\n"); } #ifdef RTE_LIBRTE_CMDLINE @@ -546,6 +516,10 @@ parse_event_printing_config(const char *optarg, int enable) mask = UINT32_C(1) << RTE_ETH_EVENT_MACSEC; else if (!strcmp(optarg, "intr_rmv")) mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV; + else if (!strcmp(optarg, "dev_probed")) + mask = UINT32_C(1) << RTE_ETH_EVENT_NEW; + else if (!strcmp(optarg, "dev_released")) + mask = UINT32_C(1) << RTE_ETH_EVENT_DESTROY; else if (!strcmp(optarg, "all")) mask = ~UINT32_C(0); else { @@ -565,7 +539,11 @@ launch_args_parse(int argc, char** argv) int n, opt; char **argvopt; int opt_idx; + portid_t pid; enum { TX, RX }; + /* Default offloads for all ports. */ + uint64_t rx_offloads = rx_mode.offloads; + uint64_t tx_offloads = tx_mode.offloads; static struct option lgopts[] = { { "help", 0, 0, 0 }, @@ -607,10 +585,10 @@ launch_args_parse(int argc, char** argv) { "enable-rx-cksum", 0, 0, 0 }, { "enable-rx-timestamp", 0, 0, 0 }, { "enable-scatter", 0, 0, 0 }, - { "disable-hw-vlan", 0, 0, 0 }, - { "disable-hw-vlan-filter", 0, 0, 0 }, - { "disable-hw-vlan-strip", 0, 0, 0 }, - { "disable-hw-vlan-extend", 0, 0, 0 }, + { "enable-hw-vlan", 0, 0, 0 }, + { "enable-hw-vlan-filter", 0, 0, 0 }, + { "enable-hw-vlan-strip", 0, 0, 0 }, + { "enable-hw-vlan-extend", 0, 0, 0 }, { "enable-drop-en", 0, 0, 0 }, { "disable-rss", 0, 0, 0 }, { "port-topology", 1, 0, 0 }, @@ -628,7 +606,6 @@ launch_args_parse(int argc, char** argv) { "txwt", 1, 0, 0 }, { "txfreet", 1, 0, 0 }, { "txrst", 1, 0, 0 }, - { "txqflags", 1, 0, 0 }, { "rxpt", 1, 0, 0 }, { "rxht", 1, 0, 0 }, { "rxwt", 1, 0, 0 }, @@ -643,6 +620,7 @@ launch_args_parse(int argc, char** argv) { "no-rmv-interrupt", 0, 0, 0 }, { "print-event", 1, 0, 0 }, { "mask-event", 1, 0, 0 }, + { "tx-offloads", 1, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -804,7 +782,8 @@ launch_args_parse(int argc, char** argv) if (n >= ETHER_MIN_LEN) { rx_mode.max_rx_pkt_len = (uint32_t) n; if (n > ETHER_MAX_LEN) - rx_mode.jumbo_frame = 1; + rx_offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; } else rte_exit(EXIT_FAILURE, "Invalid max-pkt-len=%d - should be > %d\n", @@ -897,34 +876,30 @@ launch_args_parse(int argc, char** argv) } #endif if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip")) - rx_mode.hw_strip_crc = 0; + rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; if (!strcmp(lgopts[opt_idx].name, "enable-lro")) - rx_mode.enable_lro = 1; + rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO; if (!strcmp(lgopts[opt_idx].name, "enable-scatter")) - rx_mode.enable_scatter = 1; + rx_offloads |= DEV_RX_OFFLOAD_SCATTER; if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum")) - rx_mode.hw_ip_checksum = 1; + rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM; if (!strcmp(lgopts[opt_idx].name, "enable-rx-timestamp")) - rx_mode.hw_timestamp = 1; - - if (!strcmp(lgopts[opt_idx].name, "disable-hw-vlan")) { - rx_mode.hw_vlan_filter = 0; - rx_mode.hw_vlan_strip = 0; - rx_mode.hw_vlan_extend = 0; - } + rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP; + if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN; if (!strcmp(lgopts[opt_idx].name, - "disable-hw-vlan-filter")) - rx_mode.hw_vlan_filter = 0; + "enable-hw-vlan-filter")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; if (!strcmp(lgopts[opt_idx].name, - "disable-hw-vlan-strip")) - rx_mode.hw_vlan_strip = 0; + "enable-hw-vlan-strip")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; if (!strcmp(lgopts[opt_idx].name, - "disable-hw-vlan-extend")) - rx_mode.hw_vlan_extend = 0; + "enable-hw-vlan-extend")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; if (!strcmp(lgopts[opt_idx].name, "enable-drop-en")) rx_drop_en = 1; @@ -951,21 +926,21 @@ launch_args_parse(int argc, char** argv) rss_hf = ETH_RSS_UDP; if (!strcmp(lgopts[opt_idx].name, "rxq")) { n = atoi(optarg); - if (n >= 0 && n <= (int) MAX_QUEUE_ID) + if (n >= 0 && check_nb_rxq((queueid_t)n) == 0) nb_rxq = (queueid_t) n; else rte_exit(EXIT_FAILURE, "rxq %d invalid - must be" - " >= 0 && <= %d\n", n, - (int) MAX_QUEUE_ID); + " >= 0 && <= %u\n", n, + get_allowed_max_nb_rxq(&pid)); } if (!strcmp(lgopts[opt_idx].name, "txq")) { n = atoi(optarg); - if (n >= 0 && n <= (int) MAX_QUEUE_ID) + if (n >= 0 && check_nb_txq((queueid_t)n) == 0) nb_txq = (queueid_t) n; else rte_exit(EXIT_FAILURE, "txq %d invalid - must be" - " >= 0 && <= %d\n", n, - (int) MAX_QUEUE_ID); + " >= 0 && <= %u\n", n, + get_allowed_max_nb_txq(&pid)); } if (!nb_rxq && !nb_txq) { rte_exit(EXIT_FAILURE, "Either rx or tx queues should " @@ -1004,15 +979,6 @@ launch_args_parse(int argc, char** argv) else rte_exit(EXIT_FAILURE, "txrst must be >= 0\n"); } - if (!strcmp(lgopts[opt_idx].name, "txqflags")) { - char *end = NULL; - n = strtoul(optarg, &end, 16); - if (n >= 0) - txq_flags = (int32_t)n; - else - rte_exit(EXIT_FAILURE, - "txqflags must be >= 0\n"); - } if (!strcmp(lgopts[opt_idx].name, "rxd")) { n = atoi(optarg); if (n > 0) { @@ -1117,6 +1083,15 @@ launch_args_parse(int argc, char** argv) rmv_interrupt = 0; if (!strcmp(lgopts[opt_idx].name, "flow-isolate-all")) flow_isolate_all = 1; + if (!strcmp(lgopts[opt_idx].name, "tx-offloads")) { + char *end = NULL; + n = strtoull(optarg, &end, 16); + if (n >= 0) + tx_offloads = (uint64_t)n; + else + rte_exit(EXIT_FAILURE, + "tx-offloads must be >= 0\n"); + } if (!strcmp(lgopts[opt_idx].name, "print-event")) if (parse_event_printing_config(optarg, 1)) { rte_exit(EXIT_FAILURE, @@ -1140,4 +1115,8 @@ launch_args_parse(int argc, char** argv) break; } } + + /* Set offload configuration from command line parameters. */ + rx_mode.offloads = rx_offloads; + tx_mode.offloads = tx_offloads; } diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c index fb6e8e33..a93d8061 100644 --- a/app/test-pmd/rxonly.c +++ b/app/test-pmd/rxonly.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include <stdarg.h> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index c3ab4484..4c0e2586 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include <stdarg.h> @@ -67,6 +38,7 @@ #include <rte_mempool.h> #include <rte_malloc.h> #include <rte_mbuf.h> +#include <rte_mbuf_pool_ops.h> #include <rte_interrupts.h> #include <rte_pci.h> #include <rte_ether.h> @@ -91,6 +63,7 @@ #include "testpmd.h" uint16_t verbose_level = 0; /**< Silent by default. */ +int testpmd_logtype; /**< Log type for testpmd logs */ /* use master core for command line ? */ uint8_t interactive = 0; @@ -119,6 +92,24 @@ uint8_t socket_num = UMA_NO_CONFIG; uint8_t mp_anon = 0; /* + * Store specified sockets on which memory pool to be used by ports + * is allocated. + */ +uint8_t port_numa[RTE_MAX_ETHPORTS]; + +/* + * Store specified sockets on which RX ring to be used by ports + * is allocated. + */ +uint8_t rxring_numa[RTE_MAX_ETHPORTS]; + +/* + * Store specified sockets on which TX ring to be used by ports + * is allocated. + */ +uint8_t txring_numa[RTE_MAX_ETHPORTS]; + +/* * Record the Ethernet address of peer target ports to which packets are * forwarded. * Must be instantiated with the ethernet addresses of peer traffic generator @@ -220,8 +211,8 @@ queueid_t nb_txq = 1; /**< Number of TX queues per port. */ /* * Configurable number of RX/TX ring descriptors. */ -#define RTE_TEST_RX_DESC_DEFAULT 128 -#define RTE_TEST_TX_DESC_DEFAULT 512 +#define RTE_TEST_RX_DESC_DEFAULT 1024 +#define RTE_TEST_TX_DESC_DEFAULT 1024 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ @@ -259,11 +250,6 @@ int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; /* - * Configurable value of TX queue flags. - */ -int32_t txq_flags = RTE_PMD_PARAM_UNSET; - -/* * Receive Side Scaling (RSS) configuration. */ uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ @@ -338,15 +324,12 @@ lcoreid_t latencystats_lcore_id = -1; */ struct rte_eth_rxmode rx_mode = { .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ - .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled. */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ - .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ - .hw_vlan_strip = 1, /**< VLAN strip enabled. */ - .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ - .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */ - .hw_timestamp = 0, /**< HW timestamp enabled. */ + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, + .ignore_offload_bitfield = 1, +}; + +struct rte_eth_txmode tx_mode = { + .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, }; struct rte_fdir_conf fdir_conf = { @@ -512,7 +495,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); - RTE_LOG(INFO, USER1, + TESTPMD_LOG(INFO, "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", pool_name, nb_mbuf, mbuf_seg_size, socket_id); @@ -533,6 +516,8 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); } else { /* wrapper to rte_mempool_create() */ + TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", + rte_mbuf_best_mempool_ops()); rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, mb_mempool_cache, 0, mbuf_seg_size, socket_id); } @@ -568,6 +553,98 @@ check_socket_id(const unsigned int socket_id) return 0; } +/* + * Get the allowed maximum number of RX queues. + * *pid return the port id which has minimal value of + * max_rx_queues in all ports. + */ +queueid_t +get_allowed_max_nb_rxq(portid_t *pid) +{ + queueid_t allowed_max_rxq = MAX_QUEUE_ID; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + rte_eth_dev_info_get(pi, &dev_info); + if (dev_info.max_rx_queues < allowed_max_rxq) { + allowed_max_rxq = dev_info.max_rx_queues; + *pid = pi; + } + } + return allowed_max_rxq; +} + +/* + * Check input rxq is valid or not. + * If input rxq is not greater than any of maximum number + * of RX queues of all ports, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_rxq(queueid_t rxq) +{ + queueid_t allowed_max_rxq; + portid_t pid = 0; + + allowed_max_rxq = get_allowed_max_nb_rxq(&pid); + if (rxq > allowed_max_rxq) { + printf("Fail: input rxq (%u) can't be greater " + "than max_rx_queues (%u) of port %u\n", + rxq, + allowed_max_rxq, + pid); + return -1; + } + return 0; +} + +/* + * Get the allowed maximum number of TX queues. + * *pid return the port id which has minimal value of + * max_tx_queues in all ports. + */ +queueid_t +get_allowed_max_nb_txq(portid_t *pid) +{ + queueid_t allowed_max_txq = MAX_QUEUE_ID; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + rte_eth_dev_info_get(pi, &dev_info); + if (dev_info.max_tx_queues < allowed_max_txq) { + allowed_max_txq = dev_info.max_tx_queues; + *pid = pi; + } + } + return allowed_max_txq; +} + +/* + * Check input txq is valid or not. + * If input txq is not greater than any of maximum number + * of TX queues of all ports, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_txq(queueid_t txq) +{ + queueid_t allowed_max_txq; + portid_t pid = 0; + + allowed_max_txq = get_allowed_max_nb_txq(&pid); + if (txq > allowed_max_txq) { + printf("Fail: input txq (%u) can't be greater " + "than max_tx_queues (%u) of port %u\n", + txq, + allowed_max_txq, + pid); + return -1; + } + return 0; +} + static void init_config(void) { @@ -609,8 +686,14 @@ init_config(void) RTE_ETH_FOREACH_DEV(pid) { port = &ports[pid]; + /* Apply default TxRx configuration for all ports */ + port->dev_conf.txmode = tx_mode; + port->dev_conf.rxmode = rx_mode; rte_eth_dev_info_get(pid, &port->dev_info); - + if (!(port->dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + port->dev_conf.txmode.offloads &= + ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; if (numa_support) { if (port_numa[pid] != NUMA_NO_CONFIG) port_per_socket[port_numa[pid]]++; @@ -1399,15 +1482,23 @@ all_ports_started(void) } int +port_is_stopped(portid_t port_id) +{ + struct rte_port *port = &ports[port_id]; + + if ((port->port_status != RTE_PORT_STOPPED) && + (port->slave_flag == 0)) + return 0; + return 1; +} + +int all_ports_stopped(void) { portid_t pi; - struct rte_port *port; RTE_ETH_FOREACH_DEV(pi) { - port = &ports[pi]; - if ((port->port_status != RTE_PORT_STOPPED) && - (port->slave_flag == 0)) + if (!port_is_stopped(pi)) return 0; } @@ -1495,6 +1586,9 @@ start_port(portid_t pid) } if (port->need_reconfig_queues > 0) { port->need_reconfig_queues = 0; + port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + /* Apply Tx offloads configuration */ + port->tx_conf.offloads = port->dev_conf.txmode.offloads; /* setup tx queues */ for (qi = 0; qi < nb_txq; qi++) { if ((numa_support) && @@ -1521,6 +1615,8 @@ start_port(portid_t pid) port->need_reconfig_queues = 1; return -1; } + /* Apply Rx offloads configuration */ + port->rx_conf.offloads = port->dev_conf.rxmode.offloads; /* setup rx queues */ for (qi = 0; qi < nb_rxq; qi++) { if ((numa_support) && @@ -1568,20 +1664,6 @@ start_port(portid_t pid) } } - for (event_type = RTE_ETH_EVENT_UNKNOWN; - event_type < RTE_ETH_EVENT_MAX; - event_type++) { - diag = rte_eth_dev_callback_register(pi, - event_type, - eth_event_callback, - NULL); - if (diag) { - printf("Failed to setup even callback for event %d\n", - event_type); - return -1; - } - } - /* start port */ if (rte_eth_dev_start(pi) < 0) { printf("Fail to start port %d\n", pi); @@ -1608,6 +1690,20 @@ start_port(portid_t pid) need_check_link_status = 1; } + for (event_type = RTE_ETH_EVENT_UNKNOWN; + event_type < RTE_ETH_EVENT_MAX; + event_type++) { + diag = rte_eth_dev_callback_register(RTE_ETH_ALL, + event_type, + eth_event_callback, + NULL); + if (diag) { + printf("Failed to setup even callback for event %d\n", + event_type); + return -1; + } + } + if (need_check_link_status == 1 && !no_link_check) check_all_ports_link_status(RTE_PORT_ALL); else if (need_check_link_status == 0) @@ -1804,7 +1900,7 @@ detach_port(portid_t port_id) port_flow_flush(port_id); if (rte_eth_dev_detach(port_id, name)) { - RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name); + TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name); return; } @@ -1913,7 +2009,7 @@ rmv_event_callback(void *arg) close_port(port_id); printf("removing device %s\n", dev->device->name); if (rte_eal_dev_detach(dev->device)) - RTE_LOG(ERR, USER1, "Failed to detach device %s\n", + TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->device->name); } @@ -1930,6 +2026,8 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", [RTE_ETH_EVENT_MACSEC] = "MACsec", [RTE_ETH_EVENT_INTR_RMV] = "device removal", + [RTE_ETH_EVENT_NEW] = "device probed", + [RTE_ETH_EVENT_DESTROY] = "device released", [RTE_ETH_EVENT_MAX] = NULL, }; @@ -1946,6 +2044,9 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, fflush(stdout); } + if (port_id_is_invalid(port_id, DISABLED_WARN)) + return 0; + switch (type) { case RTE_ETH_EVENT_INTR_RMV: if (rte_eal_alarm_set(100000, @@ -2072,9 +2173,6 @@ rxtx_port_config(struct rte_port *port) if (tx_free_thresh != RTE_PMD_PARAM_UNSET) port->tx_conf.tx_free_thresh = tx_free_thresh; - - if (txq_flags != RTE_PMD_PARAM_UNSET) - port->tx_conf.txq_flags = txq_flags; } void @@ -2085,7 +2183,6 @@ init_port_config(void) RTE_ETH_FOREACH_DEV(pid) { port = &ports[pid]; - port->dev_conf.rxmode = rx_mode; port->dev_conf.fdir_conf = fdir_conf; if (nb_rxq > 1) { port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; @@ -2248,11 +2345,14 @@ init_port_dcb_config(portid_t pid, /* Enter DCB configuration status */ dcb_config = 1; + port_conf.rxmode = rte_port->dev_conf.rxmode; + port_conf.txmode = rte_port->dev_conf.txmode; + /*set configuration of DCB in vt mode and DCB in non-vt mode*/ retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); if (retval < 0) return retval; - port_conf.rxmode.hw_vlan_filter = 1; + port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; /** * Write the configuration into the device. @@ -2301,7 +2401,7 @@ init_port_dcb_config(portid_t pid, rxtx_port_config(rte_port); /* VLAN filter */ - rte_port->dev_conf.rxmode.hw_vlan_filter = 1; + rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; for (i = 0; i < RTE_DIM(vlan_tags); i++) rx_vft_set(pid, vlan_tags[i], 1); @@ -2384,8 +2484,13 @@ main(int argc, char** argv) if (diag < 0) rte_panic("Cannot init EAL\n"); + testpmd_logtype = rte_log_register("testpmd"); + if (testpmd_logtype < 0) + rte_panic("Cannot register log type"); + rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); + if (mlockall(MCL_CURRENT | MCL_FUTURE)) { - RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n", + TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", strerror(errno)); } @@ -2396,7 +2501,7 @@ main(int argc, char** argv) nb_ports = (portid_t) rte_eth_dev_count(); if (nb_ports == 0) - RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); + TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); /* allocate port structures, and init them */ init_port(); diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index 1639d27e..153abea0 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #ifndef _TESTPMD_H_ @@ -138,26 +109,6 @@ struct fwd_stream { #endif }; -/** Offload IP checksum in csum forward engine */ -#define TESTPMD_TX_OFFLOAD_IP_CKSUM 0x0001 -/** Offload UDP checksum in csum forward engine */ -#define TESTPMD_TX_OFFLOAD_UDP_CKSUM 0x0002 -/** Offload TCP checksum in csum forward engine */ -#define TESTPMD_TX_OFFLOAD_TCP_CKSUM 0x0004 -/** Offload SCTP checksum in csum forward engine */ -#define TESTPMD_TX_OFFLOAD_SCTP_CKSUM 0x0008 -/** Offload outer IP checksum in csum forward engine for recognized tunnels */ -#define TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM 0x0010 -/** Parse tunnel in csum forward engine. If set, dissect tunnel headers - * of rx packets. If not set, treat inner headers as payload. */ -#define TESTPMD_TX_OFFLOAD_PARSE_TUNNEL 0x0020 -/** Insert VLAN header in forward engine */ -#define TESTPMD_TX_OFFLOAD_INSERT_VLAN 0x0040 -/** Insert double VLAN header in forward engine */ -#define TESTPMD_TX_OFFLOAD_INSERT_QINQ 0x0080 -/** Offload MACsec in forward engine */ -#define TESTPMD_TX_OFFLOAD_MACSEC 0x0100 - /** Descriptor for a single flow. */ struct port_flow { size_t size; /**< Allocated space including data[]. */ @@ -215,7 +166,7 @@ struct rte_port { struct fwd_stream *rx_stream; /**< Port RX stream, if unique */ struct fwd_stream *tx_stream; /**< Port TX stream, if unique */ unsigned int socket_id; /**< For NUMA support */ - uint16_t tx_ol_flags;/**< TX Offload Flags (TESTPMD_TX_OFFLOAD...). */ + uint16_t parse_tunnel:1; /**< Parse internal headers */ uint16_t tso_segsz; /**< Segmentation offload MSS for non-tunneled packets. */ uint16_t tunnel_tso_segsz; /**< Segmentation offload MSS for tunneled pkts. */ uint16_t tx_vlan_id;/**< The tag ID */ @@ -353,6 +304,7 @@ extern uint8_t xstats_hide_zero; /**< Hide zero values for xstats display */ /* globals used for configuration */ extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */ +extern int testpmd_logtype; /**< Log type for testpmd logs */ extern uint8_t interactive; extern uint8_t auto_start; extern uint8_t tx_first; @@ -377,19 +329,19 @@ extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */ * Store specified sockets on which memory pool to be used by ports * is allocated. */ -uint8_t port_numa[RTE_MAX_ETHPORTS]; +extern uint8_t port_numa[RTE_MAX_ETHPORTS]; /* * Store specified sockets on which RX ring to be used by ports * is allocated. */ -uint8_t rxring_numa[RTE_MAX_ETHPORTS]; +extern uint8_t rxring_numa[RTE_MAX_ETHPORTS]; /* * Store specified sockets on which TX ring to be used by ports * is allocated. */ -uint8_t txring_numa[RTE_MAX_ETHPORTS]; +extern uint8_t txring_numa[RTE_MAX_ETHPORTS]; extern uint8_t socket_num; @@ -415,6 +367,8 @@ extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; extern struct rte_port *ports; extern struct rte_eth_rxmode rx_mode; +extern struct rte_eth_txmode tx_mode; + extern uint64_t rss_hf; extern queueid_t nb_rxq; @@ -427,11 +381,9 @@ extern int16_t rx_free_thresh; extern int8_t rx_drop_en; extern int16_t tx_free_thresh; extern int16_t tx_rs_thresh; -extern int32_t txq_flags; extern uint8_t dcb_config; extern uint8_t dcb_test; -extern enum dcb_queue_mapping_mode dcb_q_mapping; extern uint16_t mbuf_data_size; /**< Mbuf data space size. */ extern uint32_t param_total_num_mbufs; @@ -600,6 +552,8 @@ void set_def_fwd_config(void); void reconfig(portid_t new_port_id, unsigned socket_id); int init_fwd_streams(void); +void set_fwd_eth_peer(portid_t port_id, char *peer_addr); + void port_mtu_set(portid_t port_id, uint16_t mtu); void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos); void port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, @@ -682,6 +636,7 @@ void reset_port(portid_t pid); void attach_port(char *identifier); void detach_port(portid_t port_id); int all_ports_stopped(void); +int port_is_stopped(portid_t port_id); int port_is_started(portid_t port_id); void pmd_test_exit(void); void fdir_get_infos(portid_t port_id); @@ -715,9 +670,9 @@ void mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr); void mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr); void port_dcb_info_display(portid_t port_id); -uint8_t *open_ddp_package_file(const char *file_path, uint32_t *size); -int save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size); -int close_ddp_package_file(uint8_t *buf); +uint8_t *open_file(const char *file_path, uint32_t *size); +int save_file(const char *file_path, uint8_t *buf, uint32_t size); +int close_file(uint8_t *buf); void port_queue_region_info_display(portid_t port_id, void *buf); @@ -728,6 +683,11 @@ enum print_warning { int port_id_is_invalid(portid_t port_id, enum print_warning warning); int new_socket_id(unsigned int socket_id); +queueid_t get_allowed_max_nb_rxq(portid_t *pid); +int check_nb_rxq(queueid_t rxq); +queueid_t get_allowed_max_nb_txq(portid_t *pid); +int check_nb_txq(queueid_t txq); + /* * Work-around of a compilation error with ICC on invocations of the * rte_be_to_cpu_16() function. @@ -747,4 +707,7 @@ int new_socket_id(unsigned int socket_id); #endif #endif /* __GCC__ */ +#define TESTPMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, testpmd_logtype, "testpmd: " fmt, ## args) + #endif /* _TESTPMD_H_ */ diff --git a/app/test-pmd/tm.c b/app/test-pmd/tm.c index dd837cb8..7231552a 100644 --- a/app/test-pmd/tm.c +++ b/app/test-pmd/tm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #include <stdio.h> #include <sys/stat.h> @@ -604,6 +575,10 @@ softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h, tc_parent_node_id = h->pipe_node_id[i][j]; tnp.shared_shaper_id = (uint32_t *)calloc(1, sizeof(uint32_t)); + if (tnp.shared_shaper_id == NULL) { + printf("Shared shaper mem alloc err\n"); + return -1; + } tnp.shared_shaper_id[0] = k; pos = j + (i * PIPE_NODES_PER_SUBPORT); h->tc_node_id[pos][k] = diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index 309c7389..1f08b6ed 100644 --- a/app/test-pmd/txonly.c +++ b/app/test-pmd/txonly.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include <stdarg.h> @@ -104,6 +75,7 @@ copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt, buf = ((char*) buf + copy_len); seg = seg->next; seg_buf = rte_pktmbuf_mtod(seg, char *); + copy_len = seg->data_len; } rte_memcpy(seg_buf, buf, (size_t) len); } @@ -193,6 +165,7 @@ pkt_burst_transmit(struct fwd_stream *fs) uint32_t retry; uint64_t ol_flags = 0; uint8_t i; + uint64_t tx_offloads; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; uint64_t end_tsc; @@ -206,13 +179,14 @@ pkt_burst_transmit(struct fwd_stream *fs) mbp = current_fwd_lcore()->mbp; txp = &ports[fs->tx_port]; + tx_offloads = txp->dev_conf.txmode.offloads; vlan_tci = txp->tx_vlan_id; vlan_tci_outer = txp->tx_vlan_id_outer; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN) + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) ol_flags = PKT_TX_VLAN_PKT; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ) + if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) ol_flags |= PKT_TX_QINQ_PKT; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC) + if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) ol_flags |= PKT_TX_MACSEC; for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { pkt = rte_mbuf_raw_alloc(mbp); |