summaryrefslogtreecommitdiffstats
path: root/app/test-crypto-perf
diff options
context:
space:
mode:
Diffstat (limited to 'app/test-crypto-perf')
-rw-r--r--app/test-crypto-perf/cperf.h5
-rw-r--r--app/test-crypto-perf/cperf_ops.c274
-rw-r--r--app/test-crypto-perf/cperf_ops.h7
-rw-r--r--app/test-crypto-perf/cperf_options.h24
-rw-r--r--app/test-crypto-perf/cperf_options_parsing.c238
-rw-r--r--app/test-crypto-perf/cperf_test_latency.c102
-rw-r--r--app/test-crypto-perf/cperf_test_latency.h5
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.c60
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.h5
-rw-r--r--app/test-crypto-perf/cperf_test_vector_parsing.c98
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.c172
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.h20
-rw-r--r--app/test-crypto-perf/cperf_test_verify.c47
-rw-r--r--app/test-crypto-perf/cperf_test_verify.h5
-rw-r--r--app/test-crypto-perf/data/aes_cbc_128_sha.data2
-rw-r--r--app/test-crypto-perf/data/aes_cbc_192_sha.data2
-rw-r--r--app/test-crypto-perf/data/aes_cbc_256_sha.data2
-rw-r--r--app/test-crypto-perf/main.c183
18 files changed, 872 insertions, 379 deletions
diff --git a/app/test-crypto-perf/cperf.h b/app/test-crypto-perf/cperf.h
index 293ba940..c9f7f817 100644
--- a/app/test-crypto-perf/cperf.h
+++ b/app/test-crypto-perf/cperf.h
@@ -41,7 +41,10 @@ struct cperf_options;
struct cperf_test_vector;
struct cperf_op_fns;
-typedef void *(*cperf_constructor_t)(uint8_t dev_id, uint16_t qp_id,
+typedef void *(*cperf_constructor_t)(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *t_vec,
const struct cperf_op_fns *op_fns);
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index c2c3db57..88fb9725 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -40,7 +40,8 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector __rte_unused)
+ const struct cperf_test_vector *test_vector __rte_unused,
+ uint16_t iv_offset __rte_unused)
{
uint16_t i;
@@ -65,7 +66,8 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector __rte_unused)
+ const struct cperf_test_vector *test_vector __rte_unused,
+ uint16_t iv_offset __rte_unused)
{
uint16_t i;
@@ -90,7 +92,8 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -103,10 +106,6 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
@@ -117,6 +116,17 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
sym_op->cipher.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->cipher_iv.data,
+ test_vector->cipher_iv.length);
+
+ }
+ }
+
return 0;
}
@@ -125,7 +135,8 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -137,12 +148,19 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
+ if (test_vector->auth_iv.length) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *,
+ iv_offset);
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+
/* authentication parameters */
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->test_buffer_size;
@@ -151,24 +169,19 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
- sym_op->auth.digest.length = options->auth_digest_sz;
- sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
- sym_op->auth.aad.data = test_vector->aad.data;
- sym_op->auth.aad.length = options->auth_aad_sz;
}
@@ -182,6 +195,17 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
sym_op->auth.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ if (test_vector->auth_iv.length) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+ }
+ }
return 0;
}
@@ -190,7 +214,8 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -203,10 +228,6 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
@@ -221,7 +242,6 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->test_buffer_size;
@@ -230,24 +250,19 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
- sym_op->auth.digest.length = options->auth_digest_sz;
- sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
- sym_op->auth.aad.data = test_vector->aad.data;
- sym_op->auth.aad.length = options->auth_aad_sz;
}
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
@@ -260,6 +275,26 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->auth.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->cipher_iv.data,
+ test_vector->cipher_iv.length);
+ if (test_vector->auth_iv.length) {
+ /*
+ * Copy IV after the crypto operation and
+ * the cipher IV
+ */
+ iv_ptr += test_vector->cipher_iv.length;
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+ }
+
+ }
+
return 0;
}
@@ -268,7 +303,8 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -280,68 +316,69 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
- /* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
- sym_op->cipher.data.length = options->test_buffer_size;
- sym_op->cipher.data.offset =
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
+ /* AEAD parameters */
+ sym_op->aead.data.length = options->test_buffer_size;
+ sym_op->aead.data.offset =
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16);
- sym_op->auth.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
- sym_op->auth.aad.length = options->auth_aad_sz;
+ sym_op->aead.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
- /* authentication parameters */
- if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- sym_op->auth.digest.data = test_vector->digest.data;
- sym_op->auth.digest.phys_addr =
+ if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
+ sym_op->aead.digest.data = test_vector->digest.data;
+ sym_op->aead.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
- uint32_t offset = sym_op->cipher.data.length +
- sym_op->cipher.data.offset;
+ uint32_t offset = sym_op->aead.data.length +
+ sym_op->aead.data.offset;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
- sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
- sym_op->auth.digest.phys_addr =
+ sym_op->aead.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
-
- sym_op->auth.digest.length = options->auth_digest_sz;
}
+ }
- sym_op->auth.data.length = options->test_buffer_size;
- sym_op->auth.data.offset = options->auth_aad_sz;
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->aead_iv.data,
+ test_vector->aead_iv.length);
+ }
}
return 0;
}
static struct rte_cryptodev_sym_session *
-cperf_create_session(uint8_t dev_id,
+cperf_create_session(struct rte_mempool *sess_mp,
+ uint8_t dev_id,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
struct rte_crypto_sym_xform cipher_xform;
struct rte_crypto_sym_xform auth_xform;
+ struct rte_crypto_sym_xform aead_xform;
struct rte_cryptodev_sym_session *sess = NULL;
+ sess = rte_cryptodev_sym_session_create(sess_mp);
/*
* cipher only
*/
@@ -350,6 +387,7 @@ cperf_create_session(uint8_t dev_id,
cipher_xform.next = NULL;
cipher_xform.cipher.algo = options->cipher_algo;
cipher_xform.cipher.op = options->cipher_op;
+ cipher_xform.cipher.iv.offset = iv_offset;
/* cipher different than null */
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
@@ -357,12 +395,16 @@ cperf_create_session(uint8_t dev_id,
test_vector->cipher_key.data;
cipher_xform.cipher.key.length =
test_vector->cipher_key.length;
+ cipher_xform.cipher.iv.length =
+ test_vector->cipher_iv.length;
} else {
cipher_xform.cipher.key.data = NULL;
cipher_xform.cipher.key.length = 0;
+ cipher_xform.cipher.iv.length = 0;
}
/* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
+ sess_mp);
/*
* auth only
*/
@@ -375,27 +417,26 @@ cperf_create_session(uint8_t dev_id,
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
auth_xform.auth.digest_length =
- options->auth_digest_sz;
- auth_xform.auth.add_auth_data_length =
- options->auth_aad_sz;
+ options->digest_sz;
auth_xform.auth.key.length =
test_vector->auth_key.length;
auth_xform.auth.key.data = test_vector->auth_key.data;
+ auth_xform.auth.iv.length =
+ test_vector->auth_iv.length;
} else {
auth_xform.auth.digest_length = 0;
- auth_xform.auth.add_auth_data_length = 0;
auth_xform.auth.key.length = 0;
auth_xform.auth.key.data = NULL;
+ auth_xform.auth.iv.length = 0;
}
/* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
+ sess_mp);
/*
* cipher and auth
*/
} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
- || options->op_type == CPERF_AUTH_THEN_CIPHER
- || options->op_type == CPERF_AEAD) {
-
+ || options->op_type == CPERF_AUTH_THEN_CIPHER) {
/*
* cipher
*/
@@ -403,6 +444,7 @@ cperf_create_session(uint8_t dev_id,
cipher_xform.next = NULL;
cipher_xform.cipher.algo = options->cipher_algo;
cipher_xform.cipher.op = options->cipher_op;
+ cipher_xform.cipher.iv.offset = iv_offset;
/* cipher different than null */
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
@@ -410,9 +452,12 @@ cperf_create_session(uint8_t dev_id,
test_vector->cipher_key.data;
cipher_xform.cipher.key.length =
test_vector->cipher_key.length;
+ cipher_xform.cipher.iv.length =
+ test_vector->cipher_iv.length;
} else {
cipher_xform.cipher.key.data = NULL;
cipher_xform.cipher.key.length = 0;
+ cipher_xform.cipher.iv.length = 0;
}
/*
@@ -425,56 +470,53 @@ cperf_create_session(uint8_t dev_id,
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
- auth_xform.auth.digest_length = options->auth_digest_sz;
- auth_xform.auth.add_auth_data_length =
- options->auth_aad_sz;
- /* auth options for aes gcm */
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
- auth_xform.auth.key.length = 0;
- auth_xform.auth.key.data = NULL;
- } else { /* auth options for others */
- auth_xform.auth.key.length =
+ auth_xform.auth.digest_length = options->digest_sz;
+ auth_xform.auth.iv.length = test_vector->auth_iv.length;
+ auth_xform.auth.key.length =
test_vector->auth_key.length;
- auth_xform.auth.key.data =
- test_vector->auth_key.data;
- }
+ auth_xform.auth.key.data =
+ test_vector->auth_key.data;
} else {
auth_xform.auth.digest_length = 0;
- auth_xform.auth.add_auth_data_length = 0;
auth_xform.auth.key.length = 0;
auth_xform.auth.key.data = NULL;
+ auth_xform.auth.iv.length = 0;
}
- /* create crypto session for aes gcm */
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
- if (options->cipher_op ==
- RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- cipher_xform.next = &auth_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &cipher_xform);
- } else { /* decrypt */
- auth_xform.next = &cipher_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &auth_xform);
- }
- } else { /* create crypto session for other */
- /* cipher then auth */
- if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
- cipher_xform.next = &auth_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &cipher_xform);
- } else { /* auth then cipher */
- auth_xform.next = &cipher_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &auth_xform);
- }
+ /* cipher then auth */
+ if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ cipher_xform.next = &auth_xform;
+ /* create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &cipher_xform, sess_mp);
+ } else { /* auth then cipher */
+ auth_xform.next = &cipher_xform;
+ /* create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &auth_xform, sess_mp);
}
+ } else { /* options->op_type == CPERF_AEAD */
+ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ aead_xform.next = NULL;
+ aead_xform.aead.algo = options->aead_algo;
+ aead_xform.aead.op = options->aead_op;
+ aead_xform.aead.iv.offset = iv_offset;
+
+ aead_xform.aead.key.data =
+ test_vector->aead_key.data;
+ aead_xform.aead.key.length =
+ test_vector->aead_key.length;
+ aead_xform.aead.iv.length = test_vector->aead_iv.length;
+
+ aead_xform.aead.digest_length = options->digest_sz;
+ aead_xform.aead.aad_length =
+ options->aead_aad_sz;
+
+ /* Create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &aead_xform, sess_mp);
}
+
return sess;
}
@@ -486,14 +528,14 @@ cperf_get_op_functions(const struct cperf_options *options,
op_fns->sess_create = cperf_create_session;
- if (options->op_type == CPERF_AEAD
- || options->op_type == CPERF_AUTH_THEN_CIPHER
+ if (options->op_type == CPERF_AEAD) {
+ op_fns->populate_ops = cperf_set_ops_aead;
+ return 0;
+ }
+
+ if (options->op_type == CPERF_AUTH_THEN_CIPHER
|| options->op_type == CPERF_CIPHER_THEN_AUTH) {
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
- op_fns->populate_ops = cperf_set_ops_aead;
- else
- op_fns->populate_ops = cperf_set_ops_cipher_auth;
+ op_fns->populate_ops = cperf_set_ops_cipher_auth;
return 0;
}
if (options->op_type == CPERF_AUTH_ONLY) {
diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h
index 1b748daf..1f8fa937 100644
--- a/app/test-crypto-perf/cperf_ops.h
+++ b/app/test-crypto-perf/cperf_ops.h
@@ -41,14 +41,17 @@
typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)(
+ struct rte_mempool *sess_mp,
uint8_t dev_id, const struct cperf_options *options,
- const struct cperf_test_vector *test_vector);
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset);
typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector);
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset);
struct cperf_op_fns {
cperf_sessions_create_t sess_create;
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index b928c584..10cd2d8a 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -28,8 +28,16 @@
#define CPERF_AUTH_ALGO ("auth-algo")
#define CPERF_AUTH_OP ("auth-op")
#define CPERF_AUTH_KEY_SZ ("auth-key-sz")
-#define CPERF_AUTH_DIGEST_SZ ("auth-digest-sz")
-#define CPERF_AUTH_AAD_SZ ("auth-aad-sz")
+#define CPERF_AUTH_IV_SZ ("auth-iv-sz")
+
+#define CPERF_AEAD_ALGO ("aead-algo")
+#define CPERF_AEAD_OP ("aead-op")
+#define CPERF_AEAD_KEY_SZ ("aead-key-sz")
+#define CPERF_AEAD_IV_SZ ("aead-iv-sz")
+#define CPERF_AEAD_AAD_SZ ("aead-aad-sz")
+
+#define CPERF_DIGEST_SZ ("digest-sz")
+
#define CPERF_CSV ("csv-friendly")
#define MAX_LIST 32
@@ -76,8 +84,16 @@ struct cperf_options {
enum rte_crypto_auth_operation auth_op;
uint16_t auth_key_sz;
- uint16_t auth_digest_sz;
- uint16_t auth_aad_sz;
+ uint16_t auth_iv_sz;
+
+ enum rte_crypto_aead_algorithm aead_algo;
+ enum rte_crypto_aead_operation aead_op;
+
+ uint16_t aead_key_sz;
+ uint16_t aead_iv_sz;
+ uint16_t aead_aad_sz;
+
+ uint16_t digest_sz;
char device_type[RTE_CRYPTODEV_NAME_LEN];
enum cperf_op_type op_type;
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index d172671f..085aa8fe 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -312,7 +312,7 @@ parse_buffer_sz(struct cperf_options *opts, const char *arg)
&opts->min_buffer_size,
&opts->max_buffer_size);
if (ret < 0) {
- RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+ RTE_LOG(ERR, USER1, "failed to parse buffer size/s\n");
return -1;
}
opts->buffer_size_count = ret;
@@ -543,15 +543,76 @@ parse_auth_key_sz(struct cperf_options *opts, const char *arg)
}
static int
-parse_auth_digest_sz(struct cperf_options *opts, const char *arg)
+parse_digest_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->digest_sz, arg);
+}
+
+static int
+parse_auth_iv_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_iv_sz, arg);
+}
+
+static int
+parse_aead_algo(struct cperf_options *opts, const char *arg)
+{
+ enum rte_crypto_aead_algorithm aead_algo;
+
+ if (rte_cryptodev_get_aead_algo_enum(&aead_algo, arg) < 0) {
+ RTE_LOG(ERR, USER1, "Invalid AEAD algorithm specified\n");
+ return -1;
+ }
+
+ opts->aead_algo = aead_algo;
+
+ return 0;
+}
+
+static int
+parse_aead_op(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map aead_op_namemap[] = {
+ {
+ rte_crypto_aead_operation_strings
+ [RTE_CRYPTO_AEAD_OP_ENCRYPT],
+ RTE_CRYPTO_AEAD_OP_ENCRYPT },
+ {
+ rte_crypto_aead_operation_strings
+ [RTE_CRYPTO_AEAD_OP_DECRYPT],
+ RTE_CRYPTO_AEAD_OP_DECRYPT
+ }
+ };
+
+ int id = get_str_key_id_mapping(aead_op_namemap,
+ RTE_DIM(aead_op_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid AEAD operation specified"
+ "\n");
+ return -1;
+ }
+
+ opts->aead_op = (enum rte_crypto_aead_operation)id;
+
+ return 0;
+}
+
+static int
+parse_aead_key_sz(struct cperf_options *opts, const char *arg)
{
- return parse_uint16_t(&opts->auth_digest_sz, arg);
+ return parse_uint16_t(&opts->aead_key_sz, arg);
}
static int
-parse_auth_aad_sz(struct cperf_options *opts, const char *arg)
+parse_aead_iv_sz(struct cperf_options *opts, const char *arg)
{
- return parse_uint16_t(&opts->auth_aad_sz, arg);
+ return parse_uint16_t(&opts->aead_iv_sz, arg);
+}
+
+static int
+parse_aead_aad_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->aead_aad_sz, arg);
}
static int
@@ -600,8 +661,17 @@ static struct option lgopts[] = {
{ CPERF_AUTH_OP, required_argument, 0, 0 },
{ CPERF_AUTH_KEY_SZ, required_argument, 0, 0 },
- { CPERF_AUTH_DIGEST_SZ, required_argument, 0, 0 },
- { CPERF_AUTH_AAD_SZ, required_argument, 0, 0 },
+ { CPERF_AUTH_IV_SZ, required_argument, 0, 0 },
+
+ { CPERF_AEAD_ALGO, required_argument, 0, 0 },
+ { CPERF_AEAD_OP, required_argument, 0, 0 },
+
+ { CPERF_AEAD_KEY_SZ, required_argument, 0, 0 },
+ { CPERF_AEAD_AAD_SZ, required_argument, 0, 0 },
+ { CPERF_AEAD_IV_SZ, required_argument, 0, 0 },
+
+ { CPERF_DIGEST_SZ, required_argument, 0, 0 },
+
{ CPERF_CSV, no_argument, 0, 0},
{ NULL, 0, 0, 0 }
@@ -650,8 +720,13 @@ cperf_options_default(struct cperf_options *opts)
opts->auth_op = RTE_CRYPTO_AUTH_OP_GENERATE;
opts->auth_key_sz = 64;
- opts->auth_digest_sz = 12;
- opts->auth_aad_sz = 0;
+ opts->auth_iv_sz = 0;
+
+ opts->aead_key_sz = 0;
+ opts->aead_iv_sz = 0;
+ opts->aead_aad_sz = 0;
+
+ opts->digest_sz = 12;
}
static int
@@ -678,9 +753,14 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{ CPERF_AUTH_ALGO, parse_auth_algo },
{ CPERF_AUTH_OP, parse_auth_op },
{ CPERF_AUTH_KEY_SZ, parse_auth_key_sz },
- { CPERF_AUTH_DIGEST_SZ, parse_auth_digest_sz },
- { CPERF_AUTH_AAD_SZ, parse_auth_aad_sz },
- { CPERF_CSV, parse_csv_friendly},
+ { CPERF_AUTH_IV_SZ, parse_auth_iv_sz },
+ { CPERF_AEAD_ALGO, parse_aead_algo },
+ { CPERF_AEAD_OP, parse_aead_op },
+ { CPERF_AEAD_KEY_SZ, parse_aead_key_sz },
+ { CPERF_AEAD_IV_SZ, parse_aead_iv_sz },
+ { CPERF_AEAD_AAD_SZ, parse_aead_aad_sz },
+ { CPERF_DIGEST_SZ, parse_digest_sz },
+ { CPERF_CSV, parse_csv_friendly},
};
unsigned int i;
@@ -717,11 +797,56 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
return 0;
}
-int
-cperf_options_check(struct cperf_options *options)
+static int
+check_cipher_buffer_length(struct cperf_options *options)
{
uint32_t buffer_size, buffer_size_idx = 0;
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB) {
+ if (options->inc_buffer_size != 0)
+ buffer_size = options->min_buffer_size;
+ else
+ buffer_size = options->buffer_size_list[0];
+
+ while (buffer_size <= options->max_buffer_size) {
+ if ((buffer_size % AES_BLOCK_SIZE) != 0) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+ "not suitable for the algorithm selected\n");
+ return -EINVAL;
+ }
+
+ if (options->inc_buffer_size != 0)
+ buffer_size += options->inc_buffer_size;
+ else {
+ if (++buffer_size_idx == options->buffer_size_count)
+ break;
+ buffer_size = options->buffer_size_list[buffer_size_idx];
+ }
+
+ }
+ }
+
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
+ for (buffer_size = options->min_buffer_size;
+ buffer_size < options->max_buffer_size;
+ buffer_size += options->inc_buffer_size) {
+ if ((buffer_size % DES_BLOCK_SIZE) != 0) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+ "not suitable for the algorithm selected\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+cperf_options_check(struct cperf_options *options)
+{
if (options->segments_nb > options->min_buffer_size) {
RTE_LOG(ERR, USER1,
"Segments number greater than buffer size.\n");
@@ -795,68 +920,13 @@ cperf_options_check(struct cperf_options *options)
" options: decrypt and verify.\n");
return -EINVAL;
}
- } else if (options->op_type == CPERF_AEAD) {
- if (!(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
- options->auth_op ==
- RTE_CRYPTO_AUTH_OP_GENERATE) &&
- !(options->cipher_op ==
- RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- options->auth_op ==
- RTE_CRYPTO_AUTH_OP_VERIFY)) {
- RTE_LOG(ERR, USER1, "Use together options: encrypt and"
- " generate or decrypt and verify.\n");
- return -EINVAL;
- }
}
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_CCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (options->op_type != CPERF_AEAD) {
- RTE_LOG(ERR, USER1, "Use --optype aead\n");
+ if (options->op_type == CPERF_CIPHER_ONLY ||
+ options->op_type == CPERF_CIPHER_THEN_AUTH ||
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (check_cipher_buffer_length(options) < 0)
return -EINVAL;
- }
- }
-
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB) {
- if (options->inc_buffer_size != 0)
- buffer_size = options->min_buffer_size;
- else
- buffer_size = options->buffer_size_list[0];
-
- while (buffer_size <= options->max_buffer_size) {
- if ((buffer_size % AES_BLOCK_SIZE) != 0) {
- RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
- "not suitable for the algorithm selected\n");
- return -EINVAL;
- }
-
- if (options->inc_buffer_size != 0)
- buffer_size += options->inc_buffer_size;
- else {
- if (++buffer_size_idx == options->buffer_size_count)
- break;
- buffer_size = options->buffer_size_list[buffer_size_idx];
- }
-
- }
- }
-
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
- for (buffer_size = options->min_buffer_size;
- buffer_size < options->max_buffer_size;
- buffer_size += options->inc_buffer_size) {
- if ((buffer_size % DES_BLOCK_SIZE) != 0) {
- RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
- "not suitable for the algorithm selected\n");
- return -EINVAL;
- }
- }
}
return 0;
@@ -907,22 +977,20 @@ cperf_options_dump(struct cperf_options *opts)
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
printf("# auth algorithm: %s\n",
rte_crypto_auth_algorithm_strings[opts->auth_algo]);
printf("# auth operation: %s\n",
rte_crypto_auth_operation_strings[opts->auth_op]);
printf("# auth key size: %u\n", opts->auth_key_sz);
- printf("# auth digest size: %u\n", opts->auth_digest_sz);
- printf("# auth aad size: %u\n", opts->auth_aad_sz);
+ printf("# auth iv size: %u\n", opts->auth_iv_sz);
+ printf("# auth digest size: %u\n", opts->digest_sz);
printf("#\n");
}
if (opts->op_type == CPERF_CIPHER_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
printf("# cipher algorithm: %s\n",
rte_crypto_cipher_algorithm_strings[opts->cipher_algo]);
printf("# cipher operation: %s\n",
@@ -931,4 +999,16 @@ cperf_options_dump(struct cperf_options *opts)
printf("# cipher iv size: %u\n", opts->cipher_iv_sz);
printf("#\n");
}
+
+ if (opts->op_type == CPERF_AEAD) {
+ printf("# aead algorithm: %s\n",
+ rte_crypto_aead_algorithm_strings[opts->aead_algo]);
+ printf("# aead operation: %s\n",
+ rte_crypto_aead_operation_strings[opts->aead_op]);
+ printf("# aead key size: %u\n", opts->aead_key_sz);
+ printf("# aead iv size: %u\n", opts->aead_iv_sz);
+ printf("# aead digest size: %u\n", opts->digest_sz);
+ printf("# aead aad size: %u\n", opts->aead_aad_sz);
+ printf("#\n");
+ }
}
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index e61ac972..58b21abd 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -66,6 +66,10 @@ struct cperf_latency_ctx {
struct cperf_op_result *res;
};
+struct priv_op_data {
+ struct cperf_op_result *result;
+};
+
#define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
#define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
@@ -75,8 +79,10 @@ cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -163,14 +169,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -187,7 +193,8 @@ error:
}
void *
-cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_latency_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -207,7 +214,13 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the crypto operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ sizeof(struct cperf_op_result *);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -220,7 +233,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -250,7 +263,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -276,9 +289,14 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = sizeof(struct priv_op_data) +
+ test_vector->cipher_iv.length +
+ test_vector->auth_iv.length +
+ test_vector->aead_iv.length;
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
+
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -295,11 +313,20 @@ err:
return NULL;
}
+static inline void
+store_timestamp(struct rte_crypto_op *op, uint64_t timestamp)
+{
+ struct priv_op_data *priv_data;
+
+ priv_data = (struct priv_op_data *) (op->sym + 1);
+ priv_data->result->status = op->status;
+ priv_data->result->tsc_end = timestamp;
+}
+
int
cperf_latency_test_runner(void *arg)
{
struct cperf_latency_ctx *ctx = arg;
- struct cperf_op_result *pres;
uint16_t test_burst_size;
uint8_t burst_size_idx = 0;
@@ -311,6 +338,7 @@ cperf_latency_test_runner(void *arg)
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
uint64_t i;
+ struct priv_op_data *priv_data;
uint32_t lcore = rte_lcore_id();
@@ -339,6 +367,10 @@ cperf_latency_test_runner(void *arg)
else
test_burst_size = ctx->options->burst_size_list[0];
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ sizeof(struct cperf_op_result *);
+
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_deqd = 0;
uint64_t m_idx = 0, b_idx = 0;
@@ -360,14 +392,20 @@ cperf_latency_test_runner(void *arg)
if (burst_size != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size))
+ ops, burst_size)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
burst_size, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
tsc_start = rte_rdtsc_precise();
@@ -393,12 +431,19 @@ cperf_latency_test_runner(void *arg)
tsc_end = rte_rdtsc_precise();
/* Free memory for not enqueued operations */
- for (i = ops_enqd; i < burst_size; i++)
- rte_crypto_op_free(ops[i]);
+ if (ops_enqd != burst_size)
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)&ops[ops_enqd],
+ burst_size - ops_enqd);
for (i = 0; i < ops_enqd; i++) {
ctx->res[tsc_idx].tsc_start = tsc_start;
- ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
+ /*
+ * Private data structure starts after the end of the
+ * rte_crypto_sym_op structure.
+ */
+ priv_data = (struct priv_op_data *) (ops[i]->sym + 1);
+ priv_data->result = (void *)&ctx->res[tsc_idx];
tsc_idx++;
}
@@ -409,14 +454,11 @@ cperf_latency_test_runner(void *arg)
* the crypto operation will change the data and cause
* failures.
*/
- for (i = 0; i < ops_deqd; i++) {
- pres = (struct cperf_op_result *)
- (ops_processed[i]->opaque_data);
- pres->status = ops_processed[i]->status;
- pres->tsc_end = tsc_end;
+ for (i = 0; i < ops_deqd; i++)
+ store_timestamp(ops_processed[i], tsc_end);
- rte_crypto_op_free(ops_processed[i]);
- }
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
deqd_max = max(ops_deqd, deqd_max);
@@ -445,14 +487,11 @@ cperf_latency_test_runner(void *arg)
tsc_end = rte_rdtsc_precise();
if (ops_deqd != 0) {
- for (i = 0; i < ops_deqd; i++) {
- pres = (struct cperf_op_result *)
- (ops_processed[i]->opaque_data);
- pres->status = ops_processed[i]->status;
- pres->tsc_end = tsc_end;
+ for (i = 0; i < ops_deqd; i++)
+ store_timestamp(ops_processed[i], tsc_end);
- rte_crypto_op_free(ops_processed[i]);
- }
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
deqd_max = max(ops_deqd, deqd_max);
@@ -547,6 +586,7 @@ cperf_latency_test_destructor(void *arg)
if (ctx == NULL)
return;
- cperf_latency_test_free(ctx, ctx->options->pool_sz);
+ rte_cryptodev_stop(ctx->dev_id);
+ cperf_latency_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_latency.h b/app/test-crypto-perf/cperf_test_latency.h
index 6a2cf610..1bbedb4e 100644
--- a/app/test-crypto-perf/cperf_test_latency.h
+++ b/app/test-crypto-perf/cperf_test_latency.h
@@ -43,7 +43,10 @@
#include "cperf_test_vectors.h"
void *
-cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_latency_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 61b27ea5..3bb1cb05 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -64,8 +64,10 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -151,14 +153,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -175,7 +177,8 @@ error:
}
void *
-cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -195,7 +198,12 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the cryptop operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -208,7 +216,7 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -236,7 +244,7 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -262,9 +270,12 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = test_vector->cipher_iv.length +
+ test_vector->auth_iv.length + test_vector->aead_iv.length;
+
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -315,6 +326,9 @@ cperf_throughput_test_runner(void *test_ctx)
else
test_burst_size = ctx->options->burst_size_list[0];
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
@@ -339,14 +353,20 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed))
+ ops, ops_needed)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
/**
* When ops_needed is smaller than ops_enqd, the
@@ -396,8 +416,8 @@ cperf_throughput_test_runner(void *test_ctx)
* the crypto operation will change the data and cause
* failures.
*/
- for (i = 0; i < ops_deqd; i++)
- rte_crypto_op_free(ops_processed[i]);
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
} else {
@@ -426,8 +446,8 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_deqd == 0)
ops_deqd_failed++;
else {
- for (i = 0; i < ops_deqd; i++)
- rte_crypto_op_free(ops_processed[i]);
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
@@ -471,14 +491,14 @@ cperf_throughput_test_runner(void *test_ctx)
cycles_per_packet);
} else {
if (!only_once)
- printf("# lcore id, Buffer Size(B),"
+ printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
"Cycles/Buf\n\n");
only_once = 1;
- printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
- "%.f3;%.f3;%.f3\n",
+ printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%.3f;%.3f;%.3f\n",
ctx->lcore_id,
ctx->options->test_buffer_size,
test_burst_size,
@@ -514,5 +534,7 @@ cperf_throughput_test_destructor(void *arg)
if (ctx == NULL)
return;
+ rte_cryptodev_stop(ctx->dev_id);
+
cperf_throughput_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_throughput.h b/app/test-crypto-perf/cperf_test_throughput.h
index f1b5766c..987d0c31 100644
--- a/app/test-crypto-perf/cperf_test_throughput.h
+++ b/app/test-crypto-perf/cperf_test_throughput.h
@@ -44,7 +44,10 @@
void *
-cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_throughput_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index f384e3d9..148a6041 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -1,3 +1,34 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
#ifdef RTE_EXEC_ENV_BSDAPP
#define _WITH_GETLINE
#endif
@@ -15,7 +46,8 @@ free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts)
if (vector == NULL || opts == NULL)
return -1;
- rte_free(vector->iv.data);
+ rte_free(vector->cipher_iv.data);
+ rte_free(vector->auth_iv.data);
rte_free(vector->aad.data);
rte_free(vector->digest.data);
@@ -84,15 +116,28 @@ show_test_vector(struct cperf_test_vector *test_vector)
printf("\n");
}
- if (test_vector->iv.data) {
- printf("\niv =\n");
- for (i = 0; i < test_vector->iv.length; ++i) {
+ if (test_vector->cipher_iv.data) {
+ printf("\ncipher_iv =\n");
+ for (i = 0; i < test_vector->cipher_iv.length; ++i) {
if ((i % wrap == 0) && (i != 0))
printf("\n");
- if (i == (uint32_t)(test_vector->iv.length - 1))
- printf("0x%02x", test_vector->iv.data[i]);
+ if (i == (uint32_t)(test_vector->cipher_iv.length - 1))
+ printf("0x%02x", test_vector->cipher_iv.data[i]);
else
- printf("0x%02x, ", test_vector->iv.data[i]);
+ printf("0x%02x, ", test_vector->cipher_iv.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->auth_iv.data) {
+ printf("\nauth_iv =\n");
+ for (i = 0; i < test_vector->auth_iv.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->auth_iv.length - 1))
+ printf("0x%02x", test_vector->auth_iv.data[i]);
+ else
+ printf("0x%02x, ", test_vector->auth_iv.data[i]);
}
printf("\n");
}
@@ -300,19 +345,32 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
vector->auth_key.length = opts->auth_key_sz;
}
- } else if (strstr(key_token, "iv")) {
- rte_free(vector->iv.data);
- vector->iv.data = data;
- vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data);
+ } else if (strstr(key_token, "cipher_iv")) {
+ rte_free(vector->cipher_iv.data);
+ vector->cipher_iv.data = data;
if (tc_found)
- vector->iv.length = data_length;
+ vector->cipher_iv.length = data_length;
else {
if (opts->cipher_iv_sz > data_length) {
- printf("Global iv shorter than "
+ printf("Global cipher iv shorter than "
"cipher_iv_sz\n");
return -1;
}
- vector->iv.length = opts->cipher_iv_sz;
+ vector->cipher_iv.length = opts->cipher_iv_sz;
+ }
+
+ } else if (strstr(key_token, "auth_iv")) {
+ rte_free(vector->auth_iv.data);
+ vector->auth_iv.data = data;
+ if (tc_found)
+ vector->auth_iv.length = data_length;
+ else {
+ if (opts->auth_iv_sz > data_length) {
+ printf("Global auth iv shorter than "
+ "auth_iv_sz\n");
+ return -1;
+ }
+ vector->auth_iv.length = opts->auth_iv_sz;
}
} else if (strstr(key_token, "ciphertext")) {
@@ -336,12 +394,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
if (tc_found)
vector->aad.length = data_length;
else {
- if (opts->auth_aad_sz > data_length) {
+ if (opts->aead_aad_sz > data_length) {
printf("Global aad shorter than "
- "auth_aad_sz\n");
+ "aead_aad_sz\n");
return -1;
}
- vector->aad.length = opts->auth_aad_sz;
+ vector->aad.length = opts->aead_aad_sz;
}
} else if (strstr(key_token, "digest")) {
@@ -352,12 +410,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
if (tc_found)
vector->digest.length = data_length;
else {
- if (opts->auth_digest_sz > data_length) {
+ if (opts->digest_sz > data_length) {
printf("Global digest shorter than "
- "auth_digest_sz\n");
+ "digest_sz\n");
return -1;
}
- vector->digest.length = opts->auth_digest_sz;
+ vector->digest.length = opts->digest_sz;
}
} else {
printf("Not valid key: '%s'\n", trim_space(key_token));
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index 757957f7..e51dcc3f 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -1,3 +1,35 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include <rte_crypto.h>
#include <rte_malloc.h>
@@ -385,6 +417,13 @@ uint8_t auth_key[] = {
0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F
};
+/* AEAD key */
+uint8_t aead_key[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+
/* Digests */
uint8_t digest[2048] = { 0x00 };
@@ -403,98 +442,127 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
if (options->op_type == CPERF_CIPHER_ONLY ||
options->op_type == CPERF_CIPHER_THEN_AUTH ||
- options->op_type == CPERF_AUTH_THEN_CIPHER ||
- options->op_type == CPERF_AEAD) {
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
t_vec->cipher_key.length = 0;
t_vec->ciphertext.data = plaintext;
t_vec->cipher_key.data = NULL;
- t_vec->iv.data = NULL;
+ t_vec->cipher_iv.data = NULL;
} else {
t_vec->cipher_key.length = options->cipher_key_sz;
t_vec->ciphertext.data = ciphertext;
t_vec->cipher_key.data = cipher_key;
- t_vec->iv.data = rte_malloc(NULL, options->cipher_iv_sz,
+ t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
16);
- if (t_vec->iv.data == NULL) {
+ if (t_vec->cipher_iv.data == NULL) {
rte_free(t_vec);
return NULL;
}
- memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
+ memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
}
t_vec->ciphertext.length = options->max_buffer_size;
- t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
- t_vec->iv.length = options->cipher_iv_sz;
+
+ /* Set IV parameters */
+ t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
+ 16);
+ if (options->cipher_iv_sz && t_vec->cipher_iv.data == NULL) {
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
+ t_vec->cipher_iv.length = options->cipher_iv_sz;
+
t_vec->data.cipher_offset = 0;
t_vec->data.cipher_length = options->max_buffer_size;
+
}
if (options->op_type == CPERF_AUTH_ONLY ||
options->op_type == CPERF_CIPHER_THEN_AUTH ||
- options->op_type == CPERF_AUTH_THEN_CIPHER ||
- options->op_type == CPERF_AEAD) {
- uint8_t aad_alloc = 0;
-
- t_vec->auth_key.length = options->auth_key_sz;
-
- switch (options->auth_algo) {
- case RTE_CRYPTO_AUTH_NULL:
- t_vec->auth_key.data = NULL;
- aad_alloc = 0;
- break;
- case RTE_CRYPTO_AUTH_AES_GCM:
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (options->auth_algo == RTE_CRYPTO_AUTH_NULL) {
+ t_vec->auth_key.length = 0;
t_vec->auth_key.data = NULL;
- aad_alloc = 1;
- break;
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- t_vec->auth_key.data = auth_key;
- aad_alloc = 1;
- break;
- case RTE_CRYPTO_AUTH_AES_GMAC:
- /* auth key should be the same as cipher key */
- t_vec->auth_key.data = cipher_key;
- aad_alloc = 1;
- break;
- default:
+ t_vec->digest.data = NULL;
+ t_vec->digest.length = 0;
+ } else {
+ t_vec->auth_key.length = options->auth_key_sz;
t_vec->auth_key.data = auth_key;
- aad_alloc = 0;
- break;
+
+ t_vec->digest.data = rte_malloc(NULL,
+ options->digest_sz,
+ 16);
+ if (t_vec->digest.data == NULL) {
+ rte_free(t_vec->cipher_iv.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ t_vec->digest.phys_addr =
+ rte_malloc_virt2phy(t_vec->digest.data);
+ t_vec->digest.length = options->digest_sz;
+ memcpy(t_vec->digest.data, digest,
+ options->digest_sz);
}
+ t_vec->data.auth_offset = 0;
+ t_vec->data.auth_length = options->max_buffer_size;
+
+ /* Set IV parameters */
+ t_vec->auth_iv.data = rte_malloc(NULL, options->auth_iv_sz,
+ 16);
+ if (options->auth_iv_sz && t_vec->auth_iv.data == NULL) {
+ if (options->op_type != CPERF_AUTH_ONLY)
+ rte_free(t_vec->cipher_iv.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->auth_iv.data, iv, options->auth_iv_sz);
+ t_vec->auth_iv.length = options->auth_iv_sz;
+ }
+
+ if (options->op_type == CPERF_AEAD) {
+ t_vec->aead_key.length = options->aead_key_sz;
+ t_vec->aead_key.data = aead_key;
- if (aad_alloc && options->auth_aad_sz) {
+ if (options->aead_aad_sz) {
t_vec->aad.data = rte_malloc(NULL,
- options->auth_aad_sz, 16);
+ options->aead_aad_sz, 16);
if (t_vec->aad.data == NULL) {
- if (options->op_type != CPERF_AUTH_ONLY)
- rte_free(t_vec->iv.data);
rte_free(t_vec);
return NULL;
}
- memcpy(t_vec->aad.data, aad, options->auth_aad_sz);
+ memcpy(t_vec->aad.data, aad, options->aead_aad_sz);
+ t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.length = options->aead_aad_sz;
} else {
t_vec->aad.data = NULL;
+ t_vec->aad.length = 0;
}
- t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
- t_vec->aad.length = options->auth_aad_sz;
- t_vec->digest.data = rte_malloc(NULL, options->auth_digest_sz,
- 16);
+ t_vec->digest.data = rte_malloc(NULL, options->digest_sz,
+ 16);
if (t_vec->digest.data == NULL) {
- if (options->op_type != CPERF_AUTH_ONLY)
- rte_free(t_vec->iv.data);
rte_free(t_vec->aad.data);
rte_free(t_vec);
return NULL;
}
t_vec->digest.phys_addr =
rte_malloc_virt2phy(t_vec->digest.data);
- t_vec->digest.length = options->auth_digest_sz;
- memcpy(t_vec->digest.data, digest, options->auth_digest_sz);
- t_vec->data.auth_offset = 0;
- t_vec->data.auth_length = options->max_buffer_size;
- }
+ t_vec->digest.length = options->digest_sz;
+ memcpy(t_vec->digest.data, digest, options->digest_sz);
+ t_vec->data.aead_offset = 0;
+ t_vec->data.aead_length = options->max_buffer_size;
+ /* Set IV parameters */
+ t_vec->aead_iv.data = rte_malloc(NULL, options->aead_iv_sz,
+ 16);
+ if (options->aead_iv_sz && t_vec->aead_iv.data == NULL) {
+ rte_free(t_vec->aad.data);
+ rte_free(t_vec->digest.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->aead_iv.data, iv, options->aead_iv_sz);
+ t_vec->aead_iv.length = options->aead_iv_sz;
+ }
return t_vec;
}
diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h
index e64f1168..85955703 100644
--- a/app/test-crypto-perf/cperf_test_vectors.h
+++ b/app/test-crypto-perf/cperf_test_vectors.h
@@ -53,9 +53,23 @@ struct cperf_test_vector {
struct {
uint8_t *data;
- phys_addr_t phys_addr;
uint16_t length;
- } iv;
+ } aead_key;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } cipher_iv;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } auth_iv;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } aead_iv;
struct {
uint8_t *data;
@@ -79,6 +93,8 @@ struct cperf_test_vector {
uint32_t auth_length;
uint32_t cipher_offset;
uint32_t cipher_length;
+ uint32_t aead_offset;
+ uint32_t aead_length;
} data;
};
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 454221e6..a314646c 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -68,8 +68,10 @@ cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -155,14 +157,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -179,7 +181,8 @@ error:
}
void *
-cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_verify_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -199,7 +202,12 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the cryptop operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -212,7 +220,7 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -240,7 +248,7 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -266,9 +274,11 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = test_vector->cipher_iv.length +
+ test_vector->auth_iv.length + test_vector->aead_iv.length;
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -373,7 +383,7 @@ cperf_verify_op(struct rte_crypto_op *op,
if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
res += memcmp(data + auth_offset,
vector->digest.data,
- options->auth_digest_sz);
+ options->digest_sz);
}
return !!res;
@@ -417,6 +427,9 @@ cperf_verify_test_runner(void *test_ctx)
printf("\n# Running verify test on device: %u, lcore: %u\n",
ctx->dev_id, lcore);
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
while (ops_enqd_total < ctx->options->total_ops) {
uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
@@ -431,14 +444,20 @@ cperf_verify_test_runner(void *test_ctx)
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed))
+ ops, ops_needed)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
#ifdef CPERF_LINEARIZATION_ENABLE
if (linearize) {
@@ -575,5 +594,7 @@ cperf_verify_test_destructor(void *arg)
if (ctx == NULL)
return;
+ rte_cryptodev_stop(ctx->dev_id);
+
cperf_verify_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_verify.h b/app/test-crypto-perf/cperf_test_verify.h
index 3fa78ee6..e67b48d3 100644
--- a/app/test-crypto-perf/cperf_test_verify.h
+++ b/app/test-crypto-perf/cperf_test_verify.h
@@ -44,7 +44,10 @@
void *
-cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_verify_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/data/aes_cbc_128_sha.data b/app/test-crypto-perf/data/aes_cbc_128_sha.data
index 0b054f5a..ff555903 100644
--- a/app/test-crypto-perf/data/aes_cbc_128_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_128_sha.data
@@ -282,7 +282,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/data/aes_cbc_192_sha.data b/app/test-crypto-perf/data/aes_cbc_192_sha.data
index 7bfe3da7..3f85a004 100644
--- a/app/test-crypto-perf/data/aes_cbc_192_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_192_sha.data
@@ -283,7 +283,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/data/aes_cbc_256_sha.data b/app/test-crypto-perf/data/aes_cbc_256_sha.data
index 52dafb93..8da81611 100644
--- a/app/test-crypto-perf/data/aes_cbc_256_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_256_sha.data
@@ -283,7 +283,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 9ec2a4b4..99f5d3e0 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -1,3 +1,35 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include <stdio.h>
#include <unistd.h>
@@ -11,6 +43,9 @@
#include "cperf_test_latency.h"
#include "cperf_test_verify.h"
+#define NUM_SESSIONS 2048
+#define SESS_MEMPOOL_CACHE_SIZE 64
+
const char *cperf_test_type_strs[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
[CPERF_TEST_TYPE_LATENCY] = "latency",
@@ -44,9 +79,11 @@ const struct cperf_test cperf_testmap[] = {
};
static int
-cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
+cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
+ struct rte_mempool *session_pool_socket[])
{
- uint8_t cdev_id, enabled_cdev_count = 0, nb_lcores;
+ uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
+ unsigned int i;
int ret;
enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
@@ -66,40 +103,74 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
return -EINVAL;
}
- for (cdev_id = 0; cdev_id < enabled_cdev_count &&
- cdev_id < RTE_CRYPTO_MAX_DEVS; cdev_id++) {
+ /* Create a mempool shared by all the devices */
+ uint32_t max_sess_size = 0, sess_size;
+
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ sess_size = rte_cryptodev_get_private_session_size(cdev_id);
+ if (sess_size > max_sess_size)
+ max_sess_size = sess_size;
+ }
+
+ for (i = 0; i < enabled_cdev_count &&
+ i < RTE_CRYPTO_MAX_DEVS; i++) {
+ cdev_id = enabled_cdevs[i];
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
struct rte_cryptodev_config conf = {
.nb_queue_pairs = 1,
- .socket_id = SOCKET_ID_ANY,
- .session_mp = {
- .nb_objs = 2048,
- .cache_size = 64
- }
- };
+ .socket_id = socket_id
+ };
+
struct rte_cryptodev_qp_conf qp_conf = {
.nb_descriptors = 2048
};
- ret = rte_cryptodev_configure(enabled_cdevs[cdev_id], &conf);
+
+ if (session_pool_socket[socket_id] == NULL) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+
+ sess_mp = rte_mempool_create(mp_name,
+ NUM_SESSIONS,
+ max_sess_size,
+ SESS_MEMPOOL_CACHE_SIZE,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+
+ if (sess_mp == NULL) {
+ printf("Cannot create session pool on socket %d\n",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ printf("Allocated session pool on socket %d\n", socket_id);
+ session_pool_socket[socket_id] = sess_mp;
+ }
+
+ ret = rte_cryptodev_configure(cdev_id, &conf);
if (ret < 0) {
- printf("Failed to configure cryptodev %u",
- enabled_cdevs[cdev_id]);
+ printf("Failed to configure cryptodev %u", cdev_id);
return -EINVAL;
}
- ret = rte_cryptodev_queue_pair_setup(enabled_cdevs[cdev_id], 0,
- &qp_conf, SOCKET_ID_ANY);
+ ret = rte_cryptodev_queue_pair_setup(cdev_id, 0,
+ &qp_conf, socket_id,
+ session_pool_socket[socket_id]);
if (ret < 0) {
printf("Failed to setup queue pair %u on "
"cryptodev %u", 0, cdev_id);
return -EINVAL;
}
- ret = rte_cryptodev_start(enabled_cdevs[cdev_id]);
+ ret = rte_cryptodev_start(cdev_id);
if (ret < 0) {
printf("Failed to start device %u: error %d\n",
- enabled_cdevs[cdev_id], ret);
+ cdev_id, ret);
return -EPERM;
}
}
@@ -123,8 +194,7 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
cap_idx.algo.auth = opts->auth_algo;
@@ -137,16 +207,15 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
ret = rte_cryptodev_sym_capability_check_auth(
capability,
opts->auth_key_sz,
- opts->auth_digest_sz,
- opts->auth_aad_sz);
+ opts->digest_sz,
+ opts->auth_iv_sz);
if (ret != 0)
return ret;
}
if (opts->op_type == CPERF_CIPHER_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cap_idx.algo.cipher = opts->cipher_algo;
@@ -163,6 +232,26 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
if (ret != 0)
return ret;
}
+
+ if (opts->op_type == CPERF_AEAD) {
+
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ cap_idx.algo.aead = opts->aead_algo;
+
+ capability = rte_cryptodev_sym_capability_get(cdev_id,
+ &cap_idx);
+ if (capability == NULL)
+ return -1;
+
+ ret = rte_cryptodev_sym_capability_check_aead(
+ capability,
+ opts->aead_key_sz,
+ opts->digest_sz,
+ opts->aead_aad_sz,
+ opts->aead_iv_sz);
+ if (ret != 0)
+ return ret;
+ }
}
return 0;
@@ -185,9 +274,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->iv.data == NULL)
+ if (test_vec->cipher_iv.data == NULL)
return -1;
- if (test_vec->iv.length != opts->cipher_iv_sz)
+ if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
@@ -204,9 +293,14 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
+ if (test_vec->auth_iv.length != opts->auth_iv_sz)
+ return -1;
+ /* Auth IV is only required for some algorithms */
+ if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
+ return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
@@ -226,9 +320,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->iv.data == NULL)
+ if (test_vec->cipher_iv.data == NULL)
return -1;
- if (test_vec->iv.length != opts->cipher_iv_sz)
+ if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
@@ -240,9 +334,14 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
+ if (test_vec->auth_iv.length != opts->auth_iv_sz)
+ return -1;
+ /* Auth IV is only required for some algorithms */
+ if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
+ return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
} else if (opts->op_type == CPERF_AEAD) {
@@ -254,13 +353,17 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
+ if (test_vec->aead_iv.data == NULL)
+ return -1;
+ if (test_vec->aead_iv.length != opts->aead_iv_sz)
+ return -1;
if (test_vec->aad.data == NULL)
return -1;
- if (test_vec->aad.length != opts->auth_aad_sz)
+ if (test_vec->aad.length != opts->aead_aad_sz)
return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
return 0;
@@ -274,6 +377,7 @@ main(int argc, char **argv)
struct cperf_op_fns op_fns;
void *ctx[RTE_MAX_LCORE] = { };
+ struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
int nb_cryptodevs = 0;
uint8_t cdev_id, i;
@@ -309,7 +413,8 @@ main(int argc, char **argv)
if (!opts.silent)
cperf_options_dump(&opts);
- nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
+ nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
+ session_pool_socket);
if (nb_cryptodevs < 1) {
RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
"device type\n");
@@ -367,7 +472,10 @@ main(int argc, char **argv)
cdev_id = enabled_cdevs[i];
- ctx[cdev_id] = cperf_testmap[opts.test].constructor(cdev_id, 0,
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+
+ ctx[cdev_id] = cperf_testmap[opts.test].constructor(
+ session_pool_socket[socket_id], cdev_id, 0,
&opts, t_vec, &op_fns);
if (ctx[cdev_id] == NULL) {
RTE_LOG(ERR, USER1, "Test run constructor failed\n");
@@ -395,7 +503,14 @@ main(int argc, char **argv)
ctx[cdev_id], lcore_id);
i++;
}
- rte_eal_mp_wait_lcore();
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+ rte_eal_wait_lcore(lcore_id);
+ i++;
+ }
/* Get next size from range or list */
if (opts.inc_buffer_size != 0)