aboutsummaryrefslogtreecommitdiffstats
path: root/app/test-crypto-perf
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /app/test-crypto-perf
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'app/test-crypto-perf')
-rw-r--r--app/test-crypto-perf/Makefile6
-rw-r--r--app/test-crypto-perf/cperf_ops.c158
-rw-r--r--app/test-crypto-perf/cperf_ops.h2
-rw-r--r--app/test-crypto-perf/cperf_options.h15
-rw-r--r--app/test-crypto-perf/cperf_options_parsing.c170
-rw-r--r--app/test-crypto-perf/cperf_test_common.c244
-rw-r--r--app/test-crypto-perf/cperf_test_common.h52
-rw-r--r--app/test-crypto-perf/cperf_test_latency.c239
-rw-r--r--app/test-crypto-perf/cperf_test_pmd_cyclecount.c520
-rw-r--r--app/test-crypto-perf/cperf_test_pmd_cyclecount.h61
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.c237
-rw-r--r--app/test-crypto-perf/cperf_test_vector_parsing.c59
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.c6
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.h4
-rw-r--r--app/test-crypto-perf/cperf_test_verify.c283
-rw-r--r--app/test-crypto-perf/main.c110
16 files changed, 1436 insertions, 730 deletions
diff --git a/app/test-crypto-perf/Makefile b/app/test-crypto-perf/Makefile
index e4a989fe..c75d7ed1 100644
--- a/app/test-crypto-perf/Makefile
+++ b/app/test-crypto-perf/Makefile
@@ -42,7 +42,13 @@ SRCS-y += cperf_options_parsing.c
SRCS-y += cperf_test_vectors.c
SRCS-y += cperf_test_throughput.c
SRCS-y += cperf_test_latency.c
+SRCS-y += cperf_test_pmd_cyclecount.c
SRCS-y += cperf_test_verify.c
SRCS-y += cperf_test_vector_parsing.c
+SRCS-y += cperf_test_common.c
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y)
+LDLIBS += -lrte_pmd_crypto_scheduler
+endif
include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index 88fb9725..23d30ca3 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -37,7 +37,7 @@
static int
cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
@@ -48,10 +48,18 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
sym_op->cipher.data.length = options->test_buffer_size;
@@ -63,7 +71,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
static int
cperf_set_ops_null_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
@@ -74,10 +82,18 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* auth parameters */
sym_op->auth.data.length = options->test_buffer_size;
@@ -89,7 +105,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_cipher(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@@ -100,10 +116,18 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
@@ -132,7 +156,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
static int
cperf_set_ops_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@@ -143,10 +167,18 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
if (test_vector->auth_iv.length) {
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
@@ -167,21 +199,29 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
@@ -211,7 +251,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@@ -222,10 +262,18 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
@@ -248,21 +296,29 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
@@ -300,29 +356,41 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_aead(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
uint16_t iv_offset)
{
uint16_t i;
+ /* AAD is placed after the IV */
+ uint16_t aad_offset = iv_offset +
+ RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* AEAD parameters */
sym_op->aead.data.length = options->test_buffer_size;
- sym_op->aead.data.offset =
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16);
+ sym_op->aead.data.offset = 0;
- sym_op->aead.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
- sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
+ sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, aad_offset);
+ sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
+ aad_offset);
if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
sym_op->aead.digest.data = test_vector->digest.data;
@@ -335,21 +403,29 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->aead.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
}
@@ -358,8 +434,26 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
uint8_t *, iv_offset);
- memcpy(iv_ptr, test_vector->aead_iv.data,
+ /*
+ * If doing AES-CCM, nonce is copied one byte
+ * after the start of IV field, and AAD is copied
+ * 18 bytes after the start of the AAD field.
+ */
+ if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ memcpy(iv_ptr + 1, test_vector->aead_iv.data,
test_vector->aead_iv.length);
+
+ memcpy(ops[i]->sym->aead.aad.data + 18,
+ test_vector->aad.data,
+ test_vector->aad.length);
+ } else {
+ memcpy(iv_ptr, test_vector->aead_iv.data,
+ test_vector->aead_iv.length);
+
+ memcpy(ops[i]->sym->aead.aad.data,
+ test_vector->aad.data,
+ test_vector->aad.length);
+ }
}
}
diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h
index 1f8fa937..94951cc3 100644
--- a/app/test-crypto-perf/cperf_ops.h
+++ b/app/test-crypto-perf/cperf_ops.h
@@ -47,7 +47,7 @@ typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)(
uint16_t iv_offset);
typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index 10cd2d8a..da4fb47c 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -11,7 +11,8 @@
#define CPERF_TOTAL_OPS ("total-ops")
#define CPERF_BURST_SIZE ("burst-sz")
#define CPERF_BUFFER_SIZE ("buffer-sz")
-#define CPERF_SEGMENTS_NB ("segments-nb")
+#define CPERF_SEGMENT_SIZE ("segment-sz")
+#define CPERF_DESC_NB ("desc-nb")
#define CPERF_DEVTYPE ("devtype")
#define CPERF_OPTYPE ("optype")
@@ -40,12 +41,16 @@
#define CPERF_CSV ("csv-friendly")
+/* benchmark-specific options */
+#define CPERF_PMDCC_DELAY_MS ("pmd-cyclecount-delay-ms")
+
#define MAX_LIST 32
enum cperf_perf_test_type {
CPERF_TEST_TYPE_THROUGHPUT,
CPERF_TEST_TYPE_LATENCY,
- CPERF_TEST_TYPE_VERIFY
+ CPERF_TEST_TYPE_VERIFY,
+ CPERF_TEST_TYPE_PMDCC
};
@@ -66,8 +71,10 @@ struct cperf_options {
uint32_t pool_sz;
uint32_t total_ops;
- uint32_t segments_nb;
+ uint32_t segment_sz;
uint32_t test_buffer_size;
+ uint32_t nb_descriptors;
+ uint16_t nb_qps;
uint32_t sessionless:1;
uint32_t out_of_place:1;
@@ -113,6 +120,8 @@ struct cperf_options {
uint32_t min_burst_size;
uint32_t inc_burst_size;
+ /* pmd-cyclecount specific options */
+ uint32_t pmdcc_delay;
};
void
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index 085aa8fe..ad43e84c 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -46,6 +46,47 @@ struct name_id_map {
uint32_t id;
};
+static void
+usage(char *progname)
+{
+ printf("%s [EAL options] --\n"
+ " --silent: disable options dump\n"
+ " --ptest throughput / latency / verify / pmd-cycleount :"
+ " set test type\n"
+ " --pool_sz N: set the number of crypto ops/mbufs allocated\n"
+ " --total-ops N: set the number of total operations performed\n"
+ " --burst-sz N: set the number of packets per burst\n"
+ " --buffer-sz N: set the size of a single packet\n"
+ " --segment-sz N: set the size of the segment to use\n"
+ " --desc-nb N: set number of descriptors for each crypto device\n"
+ " --devtype TYPE: set crypto device type to use\n"
+ " --optype cipher-only / auth-only / cipher-then-auth /\n"
+ " auth-then-cipher / aead : set operation type\n"
+ " --sessionless: enable session-less crypto operations\n"
+ " --out-of-place: enable out-of-place crypto operations\n"
+ " --test-file NAME: set the test vector file path\n"
+ " --test-name NAME: set specific test name section in test file\n"
+ " --cipher-algo ALGO: set cipher algorithm\n"
+ " --cipher-op encrypt / decrypt: set the cipher operation\n"
+ " --cipher-key-sz N: set the cipher key size\n"
+ " --cipher-iv-sz N: set the cipher IV size\n"
+ " --auth-algo ALGO: set auth algorithm\n"
+ " --auth-op generate / verify: set the auth operation\n"
+ " --auth-key-sz N: set the auth key size\n"
+ " --auth-iv-sz N: set the auth IV size\n"
+ " --aead-algo ALGO: set AEAD algorithm\n"
+ " --aead-op encrypt / decrypt: set the AEAD operation\n"
+ " --aead-key-sz N: set the AEAD key size\n"
+ " --aead-iv-sz N: set the AEAD IV size\n"
+ " --aead-aad-sz N: set the AEAD AAD size\n"
+ " --digest-sz N: set the digest size\n"
+ " --pmd-cyclecount-delay-ms N: set delay between enqueue\n"
+ " and dequeue in pmd-cyclecount benchmarking mode\n"
+ " --csv-friendly: enable test result output CSV friendly\n"
+ " -h: prints this help\n",
+ progname);
+}
+
static int
get_str_key_id_mapping(struct name_id_map *map, unsigned int map_len,
const char *str_key)
@@ -76,6 +117,10 @@ parse_cperf_test_type(struct cperf_options *opts, const char *arg)
{
cperf_test_type_strs[CPERF_TEST_TYPE_LATENCY],
CPERF_TEST_TYPE_LATENCY
+ },
+ {
+ cperf_test_type_strs[CPERF_TEST_TYPE_PMDCC],
+ CPERF_TEST_TYPE_PMDCC
}
};
@@ -137,6 +182,7 @@ parse_range(const char *arg, uint32_t *min, uint32_t *max, uint32_t *inc)
if (copy_arg == NULL)
return -1;
+ errno = 0;
token = strtok(copy_arg, ":");
/* Parse minimum value */
@@ -203,6 +249,7 @@ parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
if (copy_arg == NULL)
return -1;
+ errno = 0;
token = strtok(copy_arg, ",");
/* Parse first value */
@@ -322,17 +369,35 @@ parse_buffer_sz(struct cperf_options *opts, const char *arg)
}
static int
-parse_segments_nb(struct cperf_options *opts, const char *arg)
+parse_segment_sz(struct cperf_options *opts, const char *arg)
{
- int ret = parse_uint32_t(&opts->segments_nb, arg);
+ int ret = parse_uint32_t(&opts->segment_sz, arg);
if (ret) {
- RTE_LOG(ERR, USER1, "failed to parse segments number\n");
+ RTE_LOG(ERR, USER1, "failed to parse segment size\n");
return -1;
}
- if ((opts->segments_nb == 0) || (opts->segments_nb > 255)) {
- RTE_LOG(ERR, USER1, "invalid segments number specified\n");
+ if (opts->segment_sz == 0) {
+ RTE_LOG(ERR, USER1, "Segment size has to be bigger than 0\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_desc_nb(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->nb_descriptors, arg);
+
+ if (ret) {
+ RTE_LOG(ERR, USER1, "failed to parse descriptors number\n");
+ return -1;
+ }
+
+ if (opts->nb_descriptors == 0) {
+ RTE_LOG(ERR, USER1, "invalid descriptors number specified\n");
return -1;
}
@@ -623,6 +688,20 @@ parse_csv_friendly(struct cperf_options *opts, const char *arg __rte_unused)
return 0;
}
+static int
+parse_pmd_cyclecount_delay_ms(struct cperf_options *opts,
+ const char *arg)
+{
+ int ret = parse_uint32_t(&opts->pmdcc_delay, arg);
+
+ if (ret) {
+ RTE_LOG(ERR, USER1, "failed to parse pmd-cyclecount delay\n");
+ return -1;
+ }
+
+ return 0;
+}
+
typedef int (*option_parser_t)(struct cperf_options *opts,
const char *arg);
@@ -640,7 +719,8 @@ static struct option lgopts[] = {
{ CPERF_TOTAL_OPS, required_argument, 0, 0 },
{ CPERF_BURST_SIZE, required_argument, 0, 0 },
{ CPERF_BUFFER_SIZE, required_argument, 0, 0 },
- { CPERF_SEGMENTS_NB, required_argument, 0, 0 },
+ { CPERF_SEGMENT_SIZE, required_argument, 0, 0 },
+ { CPERF_DESC_NB, required_argument, 0, 0 },
{ CPERF_DEVTYPE, required_argument, 0, 0 },
{ CPERF_OPTYPE, required_argument, 0, 0 },
@@ -674,6 +754,8 @@ static struct option lgopts[] = {
{ CPERF_CSV, no_argument, 0, 0},
+ { CPERF_PMDCC_DELAY_MS, required_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -684,6 +766,7 @@ cperf_options_default(struct cperf_options *opts)
opts->pool_sz = 8192;
opts->total_ops = 10000000;
+ opts->nb_descriptors = 2048;
opts->buffer_size_list[0] = 64;
opts->buffer_size_count = 1;
@@ -697,10 +780,15 @@ cperf_options_default(struct cperf_options *opts)
opts->min_burst_size = 32;
opts->inc_burst_size = 0;
- opts->segments_nb = 1;
+ /*
+ * Will be parsed from command line or set to
+ * maximum buffer size + digest, later
+ */
+ opts->segment_sz = 0;
strncpy(opts->device_type, "crypto_aesni_mb",
sizeof(opts->device_type));
+ opts->nb_qps = 1;
opts->op_type = CPERF_CIPHER_THEN_AUTH;
@@ -727,6 +815,8 @@ cperf_options_default(struct cperf_options *opts)
opts->aead_aad_sz = 0;
opts->digest_sz = 12;
+
+ opts->pmdcc_delay = 0;
}
static int
@@ -739,7 +829,8 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{ CPERF_TOTAL_OPS, parse_total_ops },
{ CPERF_BURST_SIZE, parse_burst_sz },
{ CPERF_BUFFER_SIZE, parse_buffer_sz },
- { CPERF_SEGMENTS_NB, parse_segments_nb },
+ { CPERF_SEGMENT_SIZE, parse_segment_sz },
+ { CPERF_DESC_NB, parse_desc_nb },
{ CPERF_DEVTYPE, parse_device_type },
{ CPERF_OPTYPE, parse_op_type },
{ CPERF_SESSIONLESS, parse_sessionless },
@@ -761,6 +852,7 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{ CPERF_AEAD_AAD_SZ, parse_aead_aad_sz },
{ CPERF_DIGEST_SZ, parse_digest_sz },
{ CPERF_CSV, parse_csv_friendly},
+ { CPERF_PMDCC_DELAY_MS, parse_pmd_cyclecount_delay_ms},
};
unsigned int i;
@@ -778,11 +870,14 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
{
int opt, retval, opt_idx;
- while ((opt = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
+ while ((opt = getopt_long(argc, argv, "h", lgopts, &opt_idx)) != EOF) {
switch (opt) {
+ case 'h':
+ usage(argv[0]);
+ rte_exit(EXIT_SUCCESS, "Displayed help\n");
+ break;
/* long options */
case 0:
-
retval = cperf_opts_parse_long(opt_idx, options);
if (retval != 0)
return retval;
@@ -790,6 +885,7 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
break;
default:
+ usage(argv[0]);
return -EINVAL;
}
}
@@ -830,14 +926,26 @@ check_cipher_buffer_length(struct cperf_options *options)
if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
- for (buffer_size = options->min_buffer_size;
- buffer_size < options->max_buffer_size;
- buffer_size += options->inc_buffer_size) {
+ if (options->inc_buffer_size != 0)
+ buffer_size = options->min_buffer_size;
+ else
+ buffer_size = options->buffer_size_list[0];
+
+ while (buffer_size <= options->max_buffer_size) {
if ((buffer_size % DES_BLOCK_SIZE) != 0) {
RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
"not suitable for the algorithm selected\n");
return -EINVAL;
}
+
+ if (options->inc_buffer_size != 0)
+ buffer_size += options->inc_buffer_size;
+ else {
+ if (++buffer_size_idx == options->buffer_size_count)
+ break;
+ buffer_size = options->buffer_size_list[buffer_size_idx];
+ }
+
}
}
@@ -847,9 +955,21 @@ check_cipher_buffer_length(struct cperf_options *options)
int
cperf_options_check(struct cperf_options *options)
{
- if (options->segments_nb > options->min_buffer_size) {
+ if (options->op_type == CPERF_CIPHER_ONLY)
+ options->digest_sz = 0;
+
+ /*
+ * If segment size is not set, assume only one segment,
+ * big enough to contain the largest buffer and the digest
+ */
+ if (options->segment_sz == 0)
+ options->segment_sz = options->max_buffer_size +
+ options->digest_sz;
+
+ if (options->segment_sz < options->digest_sz) {
RTE_LOG(ERR, USER1,
- "Segments number greater than buffer size.\n");
+ "Segment size should be at least "
+ "the size of the digest\n");
return -EINVAL;
}
@@ -882,13 +1002,6 @@ cperf_options_check(struct cperf_options *options)
}
if (options->test == CPERF_TEST_TYPE_VERIFY &&
- options->total_ops > options->pool_sz) {
- RTE_LOG(ERR, USER1, "Total number of ops must be less than or"
- " equal to the pool size.\n");
- return -EINVAL;
- }
-
- if (options->test == CPERF_TEST_TYPE_VERIFY &&
(options->inc_buffer_size != 0 ||
options->buffer_size_count > 1)) {
RTE_LOG(ERR, USER1, "Only one buffer size is allowed when "
@@ -904,6 +1017,14 @@ cperf_options_check(struct cperf_options *options)
return -EINVAL;
}
+ if (options->test == CPERF_TEST_TYPE_PMDCC &&
+ options->pool_sz < options->nb_descriptors) {
+ RTE_LOG(ERR, USER1, "For pmd cyclecount benchmarks, pool size "
+ "must be equal or greater than the number of "
+ "cryptodev descriptors.\n");
+ return -EINVAL;
+ }
+
if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
options->auth_op !=
@@ -965,13 +1086,16 @@ cperf_options_dump(struct cperf_options *opts)
printf("%u ", opts->burst_size_list[size_idx]);
printf("\n");
}
- printf("\n# segments per buffer: %u\n", opts->segments_nb);
+ printf("\n# segment size: %u\n", opts->segment_sz);
printf("#\n");
printf("# cryptodev type: %s\n", opts->device_type);
printf("#\n");
+ printf("# number of queue pairs per device: %u\n", opts->nb_qps);
printf("# crypto operation: %s\n", cperf_op_type_strs[opts->op_type]);
printf("# sessionless: %s\n", opts->sessionless ? "yes" : "no");
printf("# out of place: %s\n", opts->out_of_place ? "yes" : "no");
+ if (opts->test == CPERF_TEST_TYPE_PMDCC)
+ printf("# inter-burst delay: %u ms\n", opts->pmdcc_delay);
printf("#\n");
diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c
new file mode 100644
index 00000000..328744ef
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_common.c
@@ -0,0 +1,244 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "cperf_test_common.h"
+
+struct obj_params {
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+ uint16_t segment_sz;
+ uint16_t segments_nb;
+};
+
+static void
+fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
+ void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
+{
+ uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
+
+ /* start of buffer is after mbuf structure and priv data */
+ m->priv_size = 0;
+ m->buf_addr = (char *)m + mbuf_hdr_size;
+ m->buf_iova = rte_mempool_virt2iova(obj) +
+ mbuf_offset + mbuf_hdr_size;
+ m->buf_len = segment_sz;
+ m->data_len = segment_sz;
+
+ /* No headroom needed for the buffer */
+ m->data_off = 0;
+
+ /* init some constant fields */
+ m->pool = mp;
+ m->nb_segs = 1;
+ m->port = 0xff;
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+}
+
+static void
+fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
+ void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
+ uint16_t segments_nb)
+{
+ uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
+ uint16_t remaining_segments = segments_nb;
+ struct rte_mbuf *next_mbuf;
+ rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
+ mbuf_offset + mbuf_hdr_size;
+
+ do {
+ /* start of buffer is after mbuf structure and priv data */
+ m->priv_size = 0;
+ m->buf_addr = (char *)m + mbuf_hdr_size;
+ m->buf_iova = next_seg_phys_addr;
+ next_seg_phys_addr += mbuf_hdr_size + segment_sz;
+ m->buf_len = segment_sz;
+ m->data_len = segment_sz;
+
+ /* No headroom needed for the buffer */
+ m->data_off = 0;
+
+ /* init some constant fields */
+ m->pool = mp;
+ m->nb_segs = segments_nb;
+ m->port = 0xff;
+ rte_mbuf_refcnt_set(m, 1);
+ next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
+ mbuf_hdr_size + segment_sz);
+ m->next = next_mbuf;
+ m = next_mbuf;
+ remaining_segments--;
+
+ } while (remaining_segments > 0);
+
+ m->next = NULL;
+}
+
+static void
+mempool_obj_init(struct rte_mempool *mp,
+ void *opaque_arg,
+ void *obj,
+ __attribute__((unused)) unsigned int i)
+{
+ struct obj_params *params = opaque_arg;
+ struct rte_crypto_op *op = obj;
+ struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
+ params->src_buf_offset);
+ /* Set crypto operation */
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->phys_addr = rte_mem_virt2phy(obj);
+ op->mempool = mp;
+
+ /* Set source buffer */
+ op->sym->m_src = m;
+ if (params->segments_nb == 1)
+ fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
+ params->segment_sz);
+ else
+ fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
+ params->segment_sz, params->segments_nb);
+
+
+ /* Set destination buffer */
+ if (params->dst_buf_offset) {
+ m = (struct rte_mbuf *) ((uint8_t *) obj +
+ params->dst_buf_offset);
+ fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
+ params->segment_sz);
+ op->sym->m_dst = m;
+ } else
+ op->sym->m_dst = NULL;
+}
+
+int
+cperf_alloc_common_memory(const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ uint8_t dev_id, uint16_t qp_id,
+ size_t extra_op_priv_size,
+ uint32_t *src_buf_offset,
+ uint32_t *dst_buf_offset,
+ struct rte_mempool **pool)
+{
+ char pool_name[32] = "";
+ int ret;
+
+ /* Calculate the object size */
+ uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+ uint16_t crypto_op_private_size;
+ /*
+ * If doing AES-CCM, IV field needs to be 16 bytes long,
+ * and AAD field needs to be long enough to have 18 bytes,
+ * plus the length of the AAD, and all rounded to a
+ * multiple of 16 bytes.
+ */
+ if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ crypto_op_private_size = extra_op_priv_size +
+ test_vector->cipher_iv.length +
+ test_vector->auth_iv.length +
+ RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
+ RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
+ } else {
+ crypto_op_private_size = extra_op_priv_size +
+ test_vector->cipher_iv.length +
+ test_vector->auth_iv.length +
+ test_vector->aead_iv.length +
+ options->aead_aad_sz;
+ }
+
+ uint16_t crypto_op_total_size = crypto_op_size +
+ crypto_op_private_size;
+ uint16_t crypto_op_total_size_padded =
+ RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
+ uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
+ uint32_t max_size = options->max_buffer_size + options->digest_sz;
+ uint16_t segments_nb = (max_size % options->segment_sz) ?
+ (max_size / options->segment_sz) + 1 :
+ max_size / options->segment_sz;
+ uint32_t obj_size = crypto_op_total_size_padded +
+ (mbuf_size * segments_nb);
+
+ snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
+ dev_id, qp_id);
+
+ *src_buf_offset = crypto_op_total_size_padded;
+
+ struct obj_params params = {
+ .segment_sz = options->segment_sz,
+ .segments_nb = segments_nb,
+ .src_buf_offset = crypto_op_total_size_padded,
+ .dst_buf_offset = 0
+ };
+
+ if (options->out_of_place) {
+ *dst_buf_offset = *src_buf_offset +
+ (mbuf_size * segments_nb);
+ params.dst_buf_offset = *dst_buf_offset;
+ /* Destination buffer will be one segment only */
+ obj_size += max_size;
+ }
+
+ *pool = rte_mempool_create_empty(pool_name,
+ options->pool_sz, obj_size, 512, 0,
+ rte_socket_id(), 0);
+ if (*pool == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Cannot allocate mempool for device %u\n",
+ dev_id);
+ return -1;
+ }
+
+ ret = rte_mempool_set_ops_byname(*pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+ if (ret != 0) {
+ RTE_LOG(ERR, USER1,
+ "Error setting mempool handler for device %u\n",
+ dev_id);
+ return -1;
+ }
+
+ ret = rte_mempool_populate_default(*pool);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Error populating mempool for device %u\n",
+ dev_id);
+ return -1;
+ }
+
+ rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
+
+ return 0;
+}
diff --git a/app/test-crypto-perf/cperf_test_common.h b/app/test-crypto-perf/cperf_test_common.h
new file mode 100644
index 00000000..4cee7852
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_common.h
@@ -0,0 +1,52 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_TEST_COMMON_H_
+#define _CPERF_TEST_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_mempool.h>
+
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+int
+cperf_alloc_common_memory(const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ uint8_t dev_id, uint16_t qp_id,
+ size_t extra_op_priv_size,
+ uint32_t *src_buf_offset,
+ uint32_t *dst_buf_offset,
+ struct rte_mempool **pool);
+
+#endif /* _CPERF_TEST_COMMON_H_ */
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 58b21abd..ca2a4ba6 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -37,7 +37,7 @@
#include "cperf_test_latency.h"
#include "cperf_ops.h"
-
+#include "cperf_test_common.h"
struct cperf_op_result {
uint64_t tsc_start;
@@ -50,17 +50,15 @@ struct cperf_latency_ctx {
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
struct cperf_op_result *res;
@@ -74,124 +72,22 @@ struct priv_op_data {
#define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
static void
-cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
+cperf_latency_test_free(struct cperf_latency_ctx *ctx)
{
- uint32_t i;
-
if (ctx) {
if (ctx->sess) {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
- if (ctx->mbufs_in) {
- for (i = 0; i < mbuf_nb; i++)
- rte_pktmbuf_free(ctx->mbufs_in[i]);
-
- rte_free(ctx->mbufs_in);
- }
-
- if (ctx->mbufs_out) {
- for (i = 0; i < mbuf_nb; i++) {
- if (ctx->mbufs_out[i] != NULL)
- rte_pktmbuf_free(ctx->mbufs_out[i]);
- }
-
- rte_free(ctx->mbufs_out);
- }
-
- if (ctx->pkt_mbuf_pool_in)
- rte_mempool_free(ctx->pkt_mbuf_pool_in);
-
- if (ctx->pkt_mbuf_pool_out)
- rte_mempool_free(ctx->pkt_mbuf_pool_out);
-
- if (ctx->crypto_op_pool)
- rte_mempool_free(ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx->res);
rte_free(ctx);
}
}
-static struct rte_mbuf *
-cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
- const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
-{
- struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
- uint8_t *mbuf_data;
- uint8_t *test_data =
- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
- test_vector->plaintext.data :
- test_vector->ciphertext.data;
-
- mbuf = rte_pktmbuf_alloc(mempool);
- if (mbuf == NULL)
- goto error;
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
-
- while (segments_nb) {
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mempool);
- if (m == NULL)
- goto error;
-
- rte_pktmbuf_chain(mbuf, m);
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
- }
-
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, last_sz);
- }
-
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
- if (mbuf_data == NULL)
- goto error;
- }
-
- if (options->op_type == CPERF_AEAD) {
- uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
-
- if (aead == NULL)
- goto error;
-
- memcpy(aead, test_vector->aad.data, test_vector->aad.length);
- }
-
- return mbuf;
-error:
- if (mbuf != NULL)
- rte_pktmbuf_free(mbuf);
-
- return NULL;
-}
-
void *
cperf_latency_test_constructor(struct rte_mempool *sess_mp,
uint8_t dev_id, uint16_t qp_id,
@@ -200,8 +96,7 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
const struct cperf_op_fns *op_fns)
{
struct cperf_latency_ctx *ctx = NULL;
- unsigned int mbuf_idx = 0;
- char pool_name[32] = "";
+ size_t extra_op_priv_size = sizeof(struct priv_op_data);
ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0);
if (ctx == NULL)
@@ -224,80 +119,10 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
if (ctx->sess == NULL)
goto err;
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_in == NULL)
- goto err;
-
- /* Generate mbufs_in with plaintext populated for test */
- ctx->mbufs_in = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) *
- ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
- options, test_vector);
- if (ctx->mbufs_in[mbuf_idx] == NULL)
- goto err;
- }
-
- if (options->out_of_place == 1) {
-
- snprintf(pool_name, sizeof(pool_name),
- "cperf_pool_out_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
- pool_name, options->pool_sz, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_out == NULL)
- goto err;
- }
-
- ctx->mbufs_out = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) *
- ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- if (options->out_of_place == 1) {
- ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
- if (ctx->mbufs_out[mbuf_idx] == NULL)
- goto err;
- } else {
- ctx->mbufs_out[mbuf_idx] = NULL;
- }
- }
-
- snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
- dev_id);
-
- uint16_t priv_size = sizeof(struct priv_op_data) +
- test_vector->cipher_iv.length +
- test_vector->auth_iv.length +
- test_vector->aead_iv.length;
- ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
- 512, priv_size, rte_socket_id());
-
- if (ctx->crypto_op_pool == NULL)
+ if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id,
+ extra_op_priv_size,
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
@@ -308,7 +133,7 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
return ctx;
err:
- cperf_latency_test_free(ctx, mbuf_idx);
+ cperf_latency_test_free(ctx);
return NULL;
}
@@ -347,7 +172,7 @@ cperf_latency_test_runner(void *arg)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
@@ -373,7 +198,7 @@ cperf_latency_test_runner(void *arg)
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_deqd = 0;
- uint64_t m_idx = 0, b_idx = 0;
+ uint64_t b_idx = 0;
uint64_t tsc_val, tsc_end, tsc_start;
uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
@@ -388,11 +213,9 @@ cperf_latency_test_runner(void *arg)
ctx->options->total_ops -
enqd_tot;
- /* Allocate crypto ops from pool */
- if (burst_size != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ burst_size) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@@ -402,8 +225,8 @@ cperf_latency_test_runner(void *arg)
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
burst_size, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
@@ -432,7 +255,7 @@ cperf_latency_test_runner(void *arg)
/* Free memory for not enqueued operations */
if (ops_enqd != burst_size)
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)&ops[ops_enqd],
burst_size - ops_enqd);
@@ -448,16 +271,11 @@ cperf_latency_test_runner(void *arg)
}
if (likely(ops_deqd)) {
- /*
- * free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
+ /* Free crypto ops so they can be reused. */
for (i = 0; i < ops_deqd; i++)
store_timestamp(ops_processed[i], tsc_end);
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
@@ -469,9 +287,6 @@ cperf_latency_test_runner(void *arg)
enqd_max = max(ops_enqd, enqd_max);
enqd_min = min(ops_enqd, enqd_min);
- m_idx += ops_enqd;
- m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
- 0 : m_idx;
b_idx++;
}
@@ -490,7 +305,7 @@ cperf_latency_test_runner(void *arg)
for (i = 0; i < ops_deqd; i++)
store_timestamp(ops_processed[i], tsc_end);
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
@@ -586,7 +401,5 @@ cperf_latency_test_destructor(void *arg)
if (ctx == NULL)
return;
- rte_cryptodev_stop(ctx->dev_id);
-
- cperf_latency_test_free(ctx, ctx->options->pool_sz);
+ cperf_latency_test_free(ctx);
}
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
new file mode 100644
index 00000000..9b41724a
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -0,0 +1,520 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+#include "cperf_ops.h"
+#include "cperf_test_pmd_cyclecount.h"
+#include "cperf_test_common.h"
+
+#define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n"
+#define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n"
+#define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
+#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.f3;%.f3;%.f3\n"
+
+struct cperf_pmd_cyclecount_ctx {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint8_t lcore_id;
+
+ struct rte_mempool *pool;
+ struct rte_crypto_op **ops;
+ struct rte_crypto_op **ops_processed;
+
+ struct rte_cryptodev_sym_session *sess;
+
+ cperf_populate_ops_t populate_ops;
+
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
+ const struct cperf_options *options;
+ const struct cperf_test_vector *test_vector;
+};
+
+struct pmd_cyclecount_state {
+ struct cperf_pmd_cyclecount_ctx *ctx;
+ const struct cperf_options *opts;
+ uint32_t lcore;
+ uint64_t delay;
+ int linearize;
+ uint32_t ops_enqd;
+ uint32_t ops_deqd;
+ uint32_t ops_enq_retries;
+ uint32_t ops_deq_retries;
+ double cycles_per_build;
+ double cycles_per_enq;
+ double cycles_per_deq;
+};
+
+static const uint16_t iv_offset =
+ sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op);
+
+static void
+cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx)
+{
+ if (ctx) {
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
+
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
+
+ if (ctx->ops)
+ rte_free(ctx->ops);
+
+ if (ctx->ops_processed)
+ rte_free(ctx->ops_processed);
+
+ rte_free(ctx);
+ }
+}
+
+void *
+cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *op_fns)
+{
+ struct cperf_pmd_cyclecount_ctx *ctx = NULL;
+
+ /* preallocate buffers for crypto ops as they can get quite big */
+ size_t alloc_sz = sizeof(struct rte_crypto_op *) *
+ options->nb_descriptors;
+
+ ctx = rte_malloc(NULL, sizeof(struct cperf_pmd_cyclecount_ctx), 0);
+ if (ctx == NULL)
+ goto err;
+
+ ctx->dev_id = dev_id;
+ ctx->qp_id = qp_id;
+
+ ctx->populate_ops = op_fns->populate_ops;
+ ctx->options = options;
+ ctx->test_vector = test_vector;
+
+ /* IV goes at the end of the crypto operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(
+ sess_mp, dev_id, options, test_vector, iv_offset);
+ if (ctx->sess == NULL)
+ goto err;
+
+ if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
+ goto err;
+
+ ctx->ops = rte_malloc("ops", alloc_sz, 0);
+ if (!ctx->ops)
+ goto err;
+
+ ctx->ops_processed = rte_malloc("ops_processed", alloc_sz, 0);
+ if (!ctx->ops_processed)
+ goto err;
+
+ return ctx;
+
+err:
+ cperf_pmd_cyclecount_test_free(ctx);
+
+ return NULL;
+}
+
+/* benchmark alloc-build-free of ops */
+static inline int
+pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
+ uint16_t test_burst_size)
+{
+ uint32_t iter_ops_left = state->opts->total_ops - cur_op;
+ uint32_t iter_ops_needed =
+ RTE_MIN(state->opts->nb_descriptors, iter_ops_left);
+ uint32_t cur_iter_op;
+
+ for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
+ cur_iter_op += test_burst_size) {
+ uint32_t burst_size = RTE_MIN(state->opts->total_ops - cur_op,
+ test_burst_size);
+ struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
+
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
+ burst_size) != 0) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
+ return -1;
+ }
+
+ /* Setup crypto op, attach mbuf etc */
+ (state->ctx->populate_ops)(ops,
+ state->ctx->src_buf_offset,
+ state->ctx->dst_buf_offset,
+ burst_size,
+ state->ctx->sess, state->opts,
+ state->ctx->test_vector, iv_offset);
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ /* Check if source mbufs require coalescing */
+ if (state->linearize) {
+ uint8_t i;
+ for (i = 0; i < burst_size; i++) {
+ struct rte_mbuf *src = ops[i]->sym->m_src;
+ rte_pktmbuf_linearize(src);
+ }
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+ rte_mempool_put_bulk(state->ctx->pool, (void **)ops,
+ burst_size);
+ }
+
+ return 0;
+}
+
+/* allocate and build ops (no free) */
+static int
+pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
+ uint32_t iter_ops_needed, uint16_t test_burst_size)
+{
+ uint32_t cur_iter_op;
+
+ for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
+ cur_iter_op += test_burst_size) {
+ uint32_t burst_size = RTE_MIN(
+ iter_ops_needed - cur_iter_op, test_burst_size);
+ struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
+
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
+ burst_size) != 0) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
+ return -1;
+ }
+
+ /* Setup crypto op, attach mbuf etc */
+ (state->ctx->populate_ops)(ops,
+ state->ctx->src_buf_offset,
+ state->ctx->dst_buf_offset,
+ burst_size,
+ state->ctx->sess, state->opts,
+ state->ctx->test_vector, iv_offset);
+ }
+ return 0;
+}
+
+/* benchmark enqueue, returns number of ops enqueued */
+static uint32_t
+pmd_cyclecount_bench_enq(struct pmd_cyclecount_state *state,
+ uint32_t iter_ops_needed, uint16_t test_burst_size)
+{
+ /* Enqueue full descriptor ring of ops on crypto device */
+ uint32_t cur_iter_op = 0;
+ while (cur_iter_op < iter_ops_needed) {
+ uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
+ test_burst_size);
+ struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
+ uint32_t burst_enqd;
+
+ burst_enqd = rte_cryptodev_enqueue_burst(state->ctx->dev_id,
+ state->ctx->qp_id, ops, burst_size);
+
+ /* if we couldn't enqueue anything, the queue is full */
+ if (!burst_enqd) {
+ /* don't try to dequeue anything we didn't enqueue */
+ return cur_iter_op;
+ }
+
+ if (burst_enqd < burst_size)
+ state->ops_enq_retries++;
+ state->ops_enqd += burst_enqd;
+ cur_iter_op += burst_enqd;
+ }
+ return iter_ops_needed;
+}
+
+/* benchmark dequeue */
+static void
+pmd_cyclecount_bench_deq(struct pmd_cyclecount_state *state,
+ uint32_t iter_ops_needed, uint16_t test_burst_size)
+{
+ /* Dequeue full descriptor ring of ops on crypto device */
+ uint32_t cur_iter_op = 0;
+ while (cur_iter_op < iter_ops_needed) {
+ uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
+ test_burst_size);
+ struct rte_crypto_op **ops_processed =
+ &state->ctx->ops[cur_iter_op];
+ uint32_t burst_deqd;
+
+ burst_deqd = rte_cryptodev_dequeue_burst(state->ctx->dev_id,
+ state->ctx->qp_id, ops_processed, burst_size);
+
+ if (burst_deqd < burst_size)
+ state->ops_deq_retries++;
+ state->ops_deqd += burst_deqd;
+ cur_iter_op += burst_deqd;
+ }
+}
+
+/* run benchmark per burst size */
+static inline int
+pmd_cyclecount_bench_burst_sz(
+ struct pmd_cyclecount_state *state, uint16_t test_burst_size)
+{
+ uint64_t tsc_start;
+ uint64_t tsc_end;
+ uint64_t tsc_op;
+ uint64_t tsc_enq;
+ uint64_t tsc_deq;
+ uint32_t cur_op;
+
+ /* reset all counters */
+ tsc_enq = 0;
+ tsc_deq = 0;
+ state->ops_enqd = 0;
+ state->ops_enq_retries = 0;
+ state->ops_deqd = 0;
+ state->ops_deq_retries = 0;
+
+ /*
+ * Benchmark crypto op alloc-build-free separately.
+ */
+ tsc_start = rte_rdtsc_precise();
+
+ for (cur_op = 0; cur_op < state->opts->total_ops;
+ cur_op += state->opts->nb_descriptors) {
+ if (unlikely(pmd_cyclecount_bench_ops(
+ state, cur_op, test_burst_size)))
+ return -1;
+ }
+
+ tsc_end = rte_rdtsc_precise();
+ tsc_op = tsc_end - tsc_start;
+
+
+ /*
+ * Hardware acceleration cyclecount benchmarking loop.
+ *
+ * We're benchmarking raw enq/deq performance by filling up the device
+ * queue, so we never get any failed enqs unless the driver won't accept
+ * the exact number of descriptors we requested, or the driver won't
+ * wrap around the end of the TX ring. However, since we're only
+ * dequeueing once we've filled up the queue, we have to benchmark it
+ * piecemeal and then average out the results.
+ */
+ cur_op = 0;
+ while (cur_op < state->opts->total_ops) {
+ uint32_t iter_ops_left = state->opts->total_ops - cur_op;
+ uint32_t iter_ops_needed = RTE_MIN(
+ state->opts->nb_descriptors, iter_ops_left);
+ uint32_t iter_ops_allocd = iter_ops_needed;
+
+ /* allocate and build ops */
+ if (unlikely(pmd_cyclecount_build_ops(state, iter_ops_needed,
+ test_burst_size)))
+ return -1;
+
+ tsc_start = rte_rdtsc_precise();
+
+ /* fill up TX ring */
+ iter_ops_needed = pmd_cyclecount_bench_enq(state,
+ iter_ops_needed, test_burst_size);
+
+ tsc_end = rte_rdtsc_precise();
+
+ tsc_enq += tsc_end - tsc_start;
+
+ /* allow for HW to catch up */
+ if (state->delay)
+ rte_delay_us_block(state->delay);
+
+ tsc_start = rte_rdtsc_precise();
+
+ /* drain RX ring */
+ pmd_cyclecount_bench_deq(state, iter_ops_needed,
+ test_burst_size);
+
+ tsc_end = rte_rdtsc_precise();
+
+ tsc_deq += tsc_end - tsc_start;
+
+ cur_op += iter_ops_needed;
+
+ /*
+ * we may not have processed all ops that we allocated, so
+ * free everything we've allocated.
+ */
+ rte_mempool_put_bulk(state->ctx->pool,
+ (void **)state->ctx->ops, iter_ops_allocd);
+ }
+
+ state->cycles_per_build = (double)tsc_op / state->opts->total_ops;
+ state->cycles_per_enq = (double)tsc_enq / state->ops_enqd;
+ state->cycles_per_deq = (double)tsc_deq / state->ops_deqd;
+
+ return 0;
+}
+
+int
+cperf_pmd_cyclecount_test_runner(void *test_ctx)
+{
+ struct pmd_cyclecount_state state = {0};
+ const struct cperf_options *opts;
+ uint16_t test_burst_size;
+ uint8_t burst_size_idx = 0;
+
+ state.ctx = test_ctx;
+ opts = state.ctx->options;
+ state.opts = opts;
+ state.lcore = rte_lcore_id();
+ state.linearize = 0;
+
+ static int only_once;
+ static bool warmup = true;
+
+ /*
+ * We need a small delay to allow for hardware to process all the crypto
+ * operations. We can't automatically figure out what the delay should
+ * be, so we leave it up to the user (by default it's 0).
+ */
+ state.delay = 1000 * opts->pmdcc_delay;
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ struct rte_cryptodev_info dev_info;
+
+ /* Check if source mbufs require coalescing */
+ if (opts->segments_sz < ctx->options->max_buffer_size) {
+ rte_cryptodev_info_get(state.ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
+ 0) {
+ state.linearize = 1;
+ }
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ state.ctx->lcore_id = state.lcore;
+
+ /* Get first size from range or list */
+ if (opts->inc_burst_size != 0)
+ test_burst_size = opts->min_burst_size;
+ else
+ test_burst_size = opts->burst_size_list[0];
+
+ while (test_burst_size <= opts->max_burst_size) {
+ /* do a benchmark run */
+ if (pmd_cyclecount_bench_burst_sz(&state, test_burst_size))
+ return -1;
+
+ /*
+ * First run is always a warm up run.
+ */
+ if (warmup) {
+ warmup = false;
+ continue;
+ }
+
+ if (!opts->csv) {
+ if (!only_once)
+ printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
+ "Burst Size", "Enqueued",
+ "Dequeued", "Enq Retries",
+ "Deq Retries", "Cycles/Op",
+ "Cycles/Enq", "Cycles/Deq");
+ only_once = 1;
+
+ printf(PRETTY_LINE_FMT, state.ctx->lcore_id,
+ opts->test_buffer_size, test_burst_size,
+ state.ops_enqd, state.ops_deqd,
+ state.ops_enq_retries,
+ state.ops_deq_retries,
+ state.cycles_per_build,
+ state.cycles_per_enq,
+ state.cycles_per_deq);
+ } else {
+ if (!only_once)
+ printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
+ "Burst Size", "Enqueued",
+ "Dequeued", "Enq Retries",
+ "Deq Retries", "Cycles/Op",
+ "Cycles/Enq", "Cycles/Deq");
+ only_once = 1;
+
+ printf(CSV_LINE_FMT, state.ctx->lcore_id,
+ opts->test_buffer_size, test_burst_size,
+ state.ops_enqd, state.ops_deqd,
+ state.ops_enq_retries,
+ state.ops_deq_retries,
+ state.cycles_per_build,
+ state.cycles_per_enq,
+ state.cycles_per_deq);
+ }
+
+ /* Get next size from range or list */
+ if (opts->inc_burst_size != 0)
+ test_burst_size += opts->inc_burst_size;
+ else {
+ if (++burst_size_idx == opts->burst_size_count)
+ break;
+ test_burst_size = opts->burst_size_list[burst_size_idx];
+ }
+ }
+
+ return 0;
+}
+
+void
+cperf_pmd_cyclecount_test_destructor(void *arg)
+{
+ struct cperf_pmd_cyclecount_ctx *ctx = arg;
+
+ if (ctx == NULL)
+ return;
+
+ cperf_pmd_cyclecount_test_free(ctx);
+}
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.h b/app/test-crypto-perf/cperf_test_pmd_cyclecount.h
new file mode 100644
index 00000000..93f0eae0
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.h
@@ -0,0 +1,61 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_TEST_PMD_CYCLECOUNT_H_
+#define _CPERF_TEST_PMD_CYCLECOUNT_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "cperf.h"
+#include "cperf_ops.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+
+void *
+cperf_pmd_cyclecount_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *ops_fn);
+
+int
+cperf_pmd_cyclecount_test_runner(void *test_ctx);
+
+void
+cperf_pmd_cyclecount_test_destructor(void *test_ctx);
+
+#endif /* _CPERF_TEST_PMD_CYCLECOUNT_H_ */
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 3bb1cb05..b84dc630 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -37,145 +37,42 @@
#include "cperf_test_throughput.h"
#include "cperf_ops.h"
+#include "cperf_test_common.h"
struct cperf_throughput_ctx {
uint8_t dev_id;
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
static void
-cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
+cperf_throughput_test_free(struct cperf_throughput_ctx *ctx)
{
- uint32_t i;
-
if (ctx) {
if (ctx->sess) {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
- if (ctx->mbufs_in) {
- for (i = 0; i < mbuf_nb; i++)
- rte_pktmbuf_free(ctx->mbufs_in[i]);
-
- rte_free(ctx->mbufs_in);
- }
-
- if (ctx->mbufs_out) {
- for (i = 0; i < mbuf_nb; i++) {
- if (ctx->mbufs_out[i] != NULL)
- rte_pktmbuf_free(ctx->mbufs_out[i]);
- }
-
- rte_free(ctx->mbufs_out);
- }
-
- if (ctx->pkt_mbuf_pool_in)
- rte_mempool_free(ctx->pkt_mbuf_pool_in);
-
- if (ctx->pkt_mbuf_pool_out)
- rte_mempool_free(ctx->pkt_mbuf_pool_out);
-
- if (ctx->crypto_op_pool)
- rte_mempool_free(ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx);
}
}
-static struct rte_mbuf *
-cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
- const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
-{
- struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
- uint8_t *mbuf_data;
- uint8_t *test_data =
- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
- test_vector->plaintext.data :
- test_vector->ciphertext.data;
-
- mbuf = rte_pktmbuf_alloc(mempool);
- if (mbuf == NULL)
- goto error;
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
-
- while (segments_nb) {
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mempool);
- if (m == NULL)
- goto error;
-
- rte_pktmbuf_chain(mbuf, m);
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
- }
-
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, last_sz);
- }
-
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
- if (mbuf_data == NULL)
- goto error;
- }
-
- if (options->op_type == CPERF_AEAD) {
- uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
-
- if (aead == NULL)
- goto error;
-
- memcpy(aead, test_vector->aad.data, test_vector->aad.length);
- }
-
- return mbuf;
-error:
- if (mbuf != NULL)
- rte_pktmbuf_free(mbuf);
-
- return NULL;
-}
-
void *
cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
uint8_t dev_id, uint16_t qp_id,
@@ -184,8 +81,6 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
const struct cperf_op_fns *op_fns)
{
struct cperf_throughput_ctx *ctx = NULL;
- unsigned int mbuf_idx = 0;
- char pool_name[32] = "";
ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0);
if (ctx == NULL)
@@ -198,7 +93,7 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the cryptop operation */
+ /* IV goes at the end of the crypto operation */
uint16_t iv_offset = sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op);
@@ -207,81 +102,14 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
if (ctx->sess == NULL)
goto err;
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_in == NULL)
- goto err;
-
- /* Generate mbufs_in with plaintext populated for test */
- ctx->mbufs_in = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
- options, test_vector);
- if (ctx->mbufs_in[mbuf_idx] == NULL)
- goto err;
- }
-
- if (options->out_of_place == 1) {
-
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
- pool_name, options->pool_sz, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_out == NULL)
- goto err;
- }
-
- ctx->mbufs_out = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) *
- ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- if (options->out_of_place == 1) {
- ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
- if (ctx->mbufs_out[mbuf_idx] == NULL)
- goto err;
- } else {
- ctx->mbufs_out[mbuf_idx] = NULL;
- }
- }
-
- snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
- dev_id);
-
- uint16_t priv_size = test_vector->cipher_iv.length +
- test_vector->auth_iv.length + test_vector->aead_iv.length;
-
- ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
- 512, priv_size, rte_socket_id());
- if (ctx->crypto_op_pool == NULL)
+ if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
return ctx;
err:
- cperf_throughput_test_free(ctx, mbuf_idx);
+ cperf_throughput_test_free(ctx);
return NULL;
}
@@ -306,7 +134,7 @@ cperf_throughput_test_runner(void *test_ctx)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
@@ -333,7 +161,7 @@ cperf_throughput_test_runner(void *test_ctx)
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
- uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
+ uint64_t tsc_start, tsc_end, tsc_duration;
uint16_t ops_unused = 0;
@@ -349,11 +177,9 @@ cperf_throughput_test_runner(void *test_ctx)
uint16_t ops_needed = burst_size - ops_unused;
- /* Allocate crypto ops from pool */
- if (ops_needed != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ ops_needed) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@@ -363,10 +189,11 @@ cperf_throughput_test_runner(void *test_ctx)
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
- ops_needed, ctx->sess, ctx->options,
- ctx->test_vector, iv_offset);
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
+ ops_needed, ctx->sess,
+ ctx->options, ctx->test_vector,
+ iv_offset);
/**
* When ops_needed is smaller than ops_enqd, the
@@ -411,12 +238,8 @@ cperf_throughput_test_runner(void *test_ctx)
ops_processed, test_burst_size);
if (likely(ops_deqd)) {
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
@@ -429,9 +252,6 @@ cperf_throughput_test_runner(void *test_ctx)
ops_deqd_failed++;
}
- m_idx += ops_needed;
- m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
- 0 : m_idx;
}
/* Dequeue any operations still in the crypto device */
@@ -446,9 +266,8 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_deqd == 0)
ops_deqd_failed++;
else {
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
-
ops_deqd_total += ops_deqd;
}
}
@@ -534,7 +353,5 @@ cperf_throughput_test_destructor(void *arg)
if (ctx == NULL)
return;
- rte_cryptodev_stop(ctx->dev_id);
-
- cperf_throughput_test_free(ctx, ctx->options->pool_sz);
+ cperf_throughput_test_free(ctx);
}
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index 148a6041..d4736f9e 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -116,6 +116,20 @@ show_test_vector(struct cperf_test_vector *test_vector)
printf("\n");
}
+ if (test_vector->aead_key.data) {
+ printf("\naead_key =\n");
+ for (i = 0; i < test_vector->aead_key.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->aead_key.length - 1))
+ printf("0x%02x", test_vector->aead_key.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->aead_key.data[i]);
+ }
+ printf("\n");
+ }
+
if (test_vector->cipher_iv.data) {
printf("\ncipher_iv =\n");
for (i = 0; i < test_vector->cipher_iv.length; ++i) {
@@ -142,6 +156,19 @@ show_test_vector(struct cperf_test_vector *test_vector)
printf("\n");
}
+ if (test_vector->aead_iv.data) {
+ printf("\naead_iv =\n");
+ for (i = 0; i < test_vector->aead_iv.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->aead_iv.length - 1))
+ printf("0x%02x", test_vector->aead_iv.data[i]);
+ else
+ printf("0x%02x, ", test_vector->aead_iv.data[i]);
+ }
+ printf("\n");
+ }
+
if (test_vector->ciphertext.data) {
printf("\nciphertext =\n");
for (i = 0; i < test_vector->ciphertext.length; ++i) {
@@ -345,6 +372,20 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
vector->auth_key.length = opts->auth_key_sz;
}
+ } else if (strstr(key_token, "aead_key")) {
+ rte_free(vector->aead_key.data);
+ vector->aead_key.data = data;
+ if (tc_found)
+ vector->aead_key.length = data_length;
+ else {
+ if (opts->aead_key_sz > data_length) {
+ printf("Global aead_key shorter than "
+ "aead_key_sz\n");
+ return -1;
+ }
+ vector->aead_key.length = opts->aead_key_sz;
+ }
+
} else if (strstr(key_token, "cipher_iv")) {
rte_free(vector->cipher_iv.data);
vector->cipher_iv.data = data;
@@ -373,6 +414,20 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
vector->auth_iv.length = opts->auth_iv_sz;
}
+ } else if (strstr(key_token, "aead_iv")) {
+ rte_free(vector->aead_iv.data);
+ vector->aead_iv.data = data;
+ if (tc_found)
+ vector->aead_iv.length = data_length;
+ else {
+ if (opts->aead_iv_sz > data_length) {
+ printf("Global aead iv shorter than "
+ "aead_iv_sz\n");
+ return -1;
+ }
+ vector->aead_iv.length = opts->aead_iv_sz;
+ }
+
} else if (strstr(key_token, "ciphertext")) {
rte_free(vector->ciphertext.data);
vector->ciphertext.data = data;
@@ -390,7 +445,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
} else if (strstr(key_token, "aad")) {
rte_free(vector->aad.data);
vector->aad.data = data;
- vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
+ vector->aad.phys_addr = rte_malloc_virt2iova(vector->aad.data);
if (tc_found)
vector->aad.length = data_length;
else {
@@ -405,7 +460,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
} else if (strstr(key_token, "digest")) {
rte_free(vector->digest.data);
vector->digest.data = data;
- vector->digest.phys_addr = rte_malloc_virt2phy(
+ vector->digest.phys_addr = rte_malloc_virt2iova(
vector->digest.data);
if (tc_found)
vector->digest.length = data_length;
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index e51dcc3f..fa911ff6 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -498,7 +498,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
t_vec->digest.phys_addr =
- rte_malloc_virt2phy(t_vec->digest.data);
+ rte_malloc_virt2iova(t_vec->digest.data);
t_vec->digest.length = options->digest_sz;
memcpy(t_vec->digest.data, digest,
options->digest_sz);
@@ -531,7 +531,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
memcpy(t_vec->aad.data, aad, options->aead_aad_sz);
- t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.phys_addr = rte_malloc_virt2iova(t_vec->aad.data);
t_vec->aad.length = options->aead_aad_sz;
} else {
t_vec->aad.data = NULL;
@@ -546,7 +546,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
t_vec->digest.phys_addr =
- rte_malloc_virt2phy(t_vec->digest.data);
+ rte_malloc_virt2iova(t_vec->digest.data);
t_vec->digest.length = options->digest_sz;
memcpy(t_vec->digest.data, digest, options->digest_sz);
t_vec->data.aead_offset = 0;
diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h
index 85955703..cb5d8284 100644
--- a/app/test-crypto-perf/cperf_test_vectors.h
+++ b/app/test-crypto-perf/cperf_test_vectors.h
@@ -78,13 +78,13 @@ struct cperf_test_vector {
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint16_t length;
} aad;
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint16_t length;
} digest;
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index a314646c..6945c8b4 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -37,23 +37,22 @@
#include "cperf_test_verify.h"
#include "cperf_ops.h"
+#include "cperf_test_common.h"
struct cperf_verify_ctx {
uint8_t dev_id;
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
@@ -63,123 +62,21 @@ struct cperf_op_result {
};
static void
-cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
+cperf_verify_test_free(struct cperf_verify_ctx *ctx)
{
- uint32_t i;
-
if (ctx) {
if (ctx->sess) {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
- if (ctx->mbufs_in) {
- for (i = 0; i < mbuf_nb; i++)
- rte_pktmbuf_free(ctx->mbufs_in[i]);
-
- rte_free(ctx->mbufs_in);
- }
-
- if (ctx->mbufs_out) {
- for (i = 0; i < mbuf_nb; i++) {
- if (ctx->mbufs_out[i] != NULL)
- rte_pktmbuf_free(ctx->mbufs_out[i]);
- }
-
- rte_free(ctx->mbufs_out);
- }
-
- if (ctx->pkt_mbuf_pool_in)
- rte_mempool_free(ctx->pkt_mbuf_pool_in);
-
- if (ctx->pkt_mbuf_pool_out)
- rte_mempool_free(ctx->pkt_mbuf_pool_out);
-
- if (ctx->crypto_op_pool)
- rte_mempool_free(ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx);
}
}
-static struct rte_mbuf *
-cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
- const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
-{
- struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
- uint8_t *mbuf_data;
- uint8_t *test_data =
- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
- test_vector->plaintext.data :
- test_vector->ciphertext.data;
-
- mbuf = rte_pktmbuf_alloc(mempool);
- if (mbuf == NULL)
- goto error;
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
-
- while (segments_nb) {
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mempool);
- if (m == NULL)
- goto error;
-
- rte_pktmbuf_chain(mbuf, m);
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
- }
-
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, last_sz);
- }
-
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
- if (mbuf_data == NULL)
- goto error;
- }
-
- if (options->op_type == CPERF_AEAD) {
- uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
-
- if (aead == NULL)
- goto error;
-
- memcpy(aead, test_vector->aad.data, test_vector->aad.length);
- }
-
- return mbuf;
-error:
- if (mbuf != NULL)
- rte_pktmbuf_free(mbuf);
-
- return NULL;
-}
-
void *
cperf_verify_test_constructor(struct rte_mempool *sess_mp,
uint8_t dev_id, uint16_t qp_id,
@@ -188,8 +85,6 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
const struct cperf_op_fns *op_fns)
{
struct cperf_verify_ctx *ctx = NULL;
- unsigned int mbuf_idx = 0;
- char pool_name[32] = "";
ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
if (ctx == NULL)
@@ -202,7 +97,7 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the cryptop operation */
+ /* IV goes at the end of the crypto operation */
uint16_t iv_offset = sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op);
@@ -211,80 +106,14 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
if (ctx->sess == NULL)
goto err;
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_in == NULL)
- goto err;
-
- /* Generate mbufs_in with plaintext populated for test */
- ctx->mbufs_in = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
- options, test_vector);
- if (ctx->mbufs_in[mbuf_idx] == NULL)
- goto err;
- }
-
- if (options->out_of_place == 1) {
-
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
- pool_name, options->pool_sz, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_out == NULL)
- goto err;
- }
-
- ctx->mbufs_out = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) *
- ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- if (options->out_of_place == 1) {
- ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
- if (ctx->mbufs_out[mbuf_idx] == NULL)
- goto err;
- } else {
- ctx->mbufs_out[mbuf_idx] = NULL;
- }
- }
-
- snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
- dev_id);
-
- uint16_t priv_size = test_vector->cipher_iv.length +
- test_vector->auth_iv.length + test_vector->aead_iv.length;
- ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
- 512, priv_size, rte_socket_id());
- if (ctx->crypto_op_pool == NULL)
+ if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
return ctx;
err:
- cperf_verify_test_free(ctx, mbuf_idx);
+ cperf_verify_test_free(ctx);
return NULL;
}
@@ -362,10 +191,13 @@ cperf_verify_op(struct rte_crypto_op *op,
break;
case CPERF_AEAD:
cipher = 1;
- cipher_offset = vector->aad.length;
+ cipher_offset = 0;
auth = 1;
- auth_offset = vector->aad.length + options->test_buffer_size;
+ auth_offset = options->test_buffer_size;
break;
+ default:
+ res = 1;
+ goto out;
}
if (cipher == 1) {
@@ -386,9 +218,39 @@ cperf_verify_op(struct rte_crypto_op *op,
options->digest_sz);
}
+out:
+ rte_free(data);
return !!res;
}
+static void
+cperf_mbuf_set(struct rte_mbuf *mbuf,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint32_t segment_sz = options->segment_sz;
+ uint8_t *mbuf_data;
+ uint8_t *test_data =
+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ test_vector->plaintext.data :
+ test_vector->ciphertext.data;
+ uint32_t remaining_bytes = options->max_buffer_size;
+
+ while (remaining_bytes) {
+ mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+
+ if (remaining_bytes <= segment_sz) {
+ memcpy(mbuf_data, test_data, remaining_bytes);
+ return;
+ }
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ remaining_bytes -= segment_sz;
+ test_data += segment_sz;
+ mbuf = mbuf->next;
+ }
+}
+
int
cperf_verify_test_runner(void *test_ctx)
{
@@ -400,7 +262,7 @@ cperf_verify_test_runner(void *test_ctx)
static int only_once;
- uint64_t i, m_idx = 0;
+ uint64_t i;
uint16_t ops_unused = 0;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
@@ -413,7 +275,7 @@ cperf_verify_test_runner(void *test_ctx)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
@@ -440,11 +302,9 @@ cperf_verify_test_runner(void *test_ctx)
uint16_t ops_needed = burst_size - ops_unused;
- /* Allocate crypto ops from pool */
- if (ops_needed != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ ops_needed) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@@ -454,11 +314,18 @@ cperf_verify_test_runner(void *test_ctx)
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
ops_needed, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
+
+ /* Populate the mbuf with the test vector, for verification */
+ for (i = 0; i < ops_needed; i++)
+ cperf_mbuf_set(ops[i]->sym->m_src,
+ ctx->options,
+ ctx->test_vector);
+
#ifdef CPERF_LINEARIZATION_ENABLE
if (linearize) {
/* PMD doesn't support scatter-gather and source buffer
@@ -488,10 +355,6 @@ cperf_verify_test_runner(void *test_ctx)
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
ops_processed, ctx->options->max_burst_size);
- m_idx += ops_needed;
- if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
- m_idx = 0;
-
if (ops_deqd == 0) {
/**
* Count dequeue polls which didn't return any
@@ -506,13 +369,10 @@ cperf_verify_test_runner(void *test_ctx)
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
ops_failed++;
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_crypto_op_free(ops_processed[i]);
}
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
@@ -534,13 +394,10 @@ cperf_verify_test_runner(void *test_ctx)
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
ops_failed++;
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_crypto_op_free(ops_processed[i]);
}
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
@@ -594,7 +451,5 @@ cperf_verify_test_destructor(void *arg)
if (ctx == NULL)
return;
- rte_cryptodev_stop(ctx->dev_id);
-
- cperf_verify_test_free(ctx, ctx->options->pool_sz);
+ cperf_verify_test_free(ctx);
}
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 99f5d3e0..29373f5b 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -35,6 +35,9 @@
#include <rte_eal.h>
#include <rte_cryptodev.h>
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#include <rte_cryptodev_scheduler.h>
+#endif
#include "cperf.h"
#include "cperf_options.h"
@@ -42,6 +45,7 @@
#include "cperf_test_throughput.h"
#include "cperf_test_latency.h"
#include "cperf_test_verify.h"
+#include "cperf_test_pmd_cyclecount.h"
#define NUM_SESSIONS 2048
#define SESS_MEMPOOL_CACHE_SIZE 64
@@ -49,7 +53,8 @@
const char *cperf_test_type_strs[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
[CPERF_TEST_TYPE_LATENCY] = "latency",
- [CPERF_TEST_TYPE_VERIFY] = "verify"
+ [CPERF_TEST_TYPE_VERIFY] = "verify",
+ [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
};
const char *cperf_op_type_strs[] = {
@@ -75,6 +80,11 @@ const struct cperf_test cperf_testmap[] = {
cperf_verify_test_constructor,
cperf_verify_test_runner,
cperf_verify_test_destructor
+ },
+ [CPERF_TEST_TYPE_PMDCC] = {
+ cperf_pmd_cyclecount_test_constructor,
+ cperf_pmd_cyclecount_test_runner,
+ cperf_pmd_cyclecount_test_destructor
}
};
@@ -83,7 +93,7 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
struct rte_mempool *session_pool_socket[])
{
uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
- unsigned int i;
+ unsigned int i, j;
int ret;
enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
@@ -112,21 +122,53 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
max_sess_size = sess_size;
}
+ /*
+ * Calculate number of needed queue pairs, based on the amount
+ * of available number of logical cores and crypto devices.
+ * For instance, if there are 4 cores and 2 crypto devices,
+ * 2 queue pairs will be set up per device.
+ */
+ opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
+ (nb_lcores / enabled_cdev_count) + 1 :
+ nb_lcores / enabled_cdev_count;
+
for (i = 0; i < enabled_cdev_count &&
i < RTE_CRYPTO_MAX_DEVS; i++) {
cdev_id = enabled_cdevs[i];
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ /*
+ * If multi-core scheduler is used, limit the number
+ * of queue pairs to 1, as there is no way to know
+ * how many cores are being used by the PMD, and
+ * how many will be available for the application.
+ */
+ if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
+ rte_cryptodev_scheduler_mode_get(cdev_id) ==
+ CDEV_SCHED_MODE_MULTICORE)
+ opts->nb_qps = 1;
+#endif
+
+ struct rte_cryptodev_info cdev_info;
uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+ rte_cryptodev_info_get(cdev_id, &cdev_info);
+ if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
+ printf("Number of needed queue pairs is higher "
+ "than the maximum number of queue pairs "
+ "per device.\n");
+ printf("Lower the number of cores or increase "
+ "the number of crypto devices\n");
+ return -EINVAL;
+ }
struct rte_cryptodev_config conf = {
- .nb_queue_pairs = 1,
- .socket_id = socket_id
+ .nb_queue_pairs = opts->nb_qps,
+ .socket_id = socket_id
};
struct rte_cryptodev_qp_conf qp_conf = {
- .nb_descriptors = 2048
+ .nb_descriptors = opts->nb_descriptors
};
-
if (session_pool_socket[socket_id] == NULL) {
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
@@ -158,14 +200,16 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
return -EINVAL;
}
- ret = rte_cryptodev_queue_pair_setup(cdev_id, 0,
+ for (j = 0; j < opts->nb_qps; j++) {
+ ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
&qp_conf, socket_id,
session_pool_socket[socket_id]);
if (ret < 0) {
printf("Failed to setup queue pair %u on "
- "cryptodev %u", 0, cdev_id);
+ "cryptodev %u", j, cdev_id);
return -EINVAL;
}
+ }
ret = rte_cryptodev_start(cdev_id);
if (ret < 0) {
@@ -380,6 +424,7 @@ main(int argc, char **argv)
struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
int nb_cryptodevs = 0;
+ uint16_t total_nb_qps = 0;
uint8_t cdev_id, i;
uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
@@ -410,11 +455,12 @@ main(int argc, char **argv)
goto err;
}
+ nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
+ session_pool_socket);
+
if (!opts.silent)
cperf_options_dump(&opts);
- nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
- session_pool_socket);
if (nb_cryptodevs < 1) {
RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
"device type\n");
@@ -464,23 +510,29 @@ main(int argc, char **argv)
if (!opts.silent)
show_test_vector(t_vec);
+ total_nb_qps = nb_cryptodevs * opts.nb_qps;
+
i = 0;
+ uint8_t qp_id = 0, cdev_index = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
+ cdev_id = enabled_cdevs[cdev_index];
uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
- ctx[cdev_id] = cperf_testmap[opts.test].constructor(
- session_pool_socket[socket_id], cdev_id, 0,
+ ctx[i] = cperf_testmap[opts.test].constructor(
+ session_pool_socket[socket_id], cdev_id, qp_id,
&opts, t_vec, &op_fns);
- if (ctx[cdev_id] == NULL) {
+ if (ctx[i] == NULL) {
RTE_LOG(ERR, USER1, "Test run constructor failed\n");
goto err;
}
+ qp_id = (qp_id + 1) % opts.nb_qps;
+ if (qp_id == 0)
+ cdev_index++;
i++;
}
@@ -494,19 +546,17 @@ main(int argc, char **argv)
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
-
rte_eal_remote_launch(cperf_testmap[opts.test].runner,
- ctx[cdev_id], lcore_id);
+ ctx[i], lcore_id);
i++;
}
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
rte_eal_wait_lcore(lcore_id);
i++;
@@ -525,15 +575,17 @@ main(int argc, char **argv)
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
-
- cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ cperf_testmap[opts.test].destructor(ctx[i]);
i++;
}
+ for (i = 0; i < nb_cryptodevs &&
+ i < RTE_CRYPTO_MAX_DEVS; i++)
+ rte_cryptodev_stop(enabled_cdevs[i]);
+
free_test_vector(t_vec, &opts);
printf("\n");
@@ -542,16 +594,20 @@ main(int argc, char **argv)
err:
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
cdev_id = enabled_cdevs[i];
- if (ctx[cdev_id] && cperf_testmap[opts.test].destructor)
- cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ if (ctx[i] && cperf_testmap[opts.test].destructor)
+ cperf_testmap[opts.test].destructor(ctx[i]);
i++;
}
+ for (i = 0; i < nb_cryptodevs &&
+ i < RTE_CRYPTO_MAX_DEVS; i++)
+ rte_cryptodev_stop(enabled_cdevs[i]);
+
free_test_vector(t_vec, &opts);
printf("\n");