aboutsummaryrefslogtreecommitdiffstats
path: root/app
diff options
context:
space:
mode:
Diffstat (limited to 'app')
-rw-r--r--app/Makefile4
-rw-r--r--app/proc_info/main.c1
-rw-r--r--app/test-crypto-perf/cperf.h5
-rw-r--r--app/test-crypto-perf/cperf_ops.c274
-rw-r--r--app/test-crypto-perf/cperf_ops.h7
-rw-r--r--app/test-crypto-perf/cperf_options.h24
-rw-r--r--app/test-crypto-perf/cperf_options_parsing.c238
-rw-r--r--app/test-crypto-perf/cperf_test_latency.c102
-rw-r--r--app/test-crypto-perf/cperf_test_latency.h5
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.c60
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.h5
-rw-r--r--app/test-crypto-perf/cperf_test_vector_parsing.c98
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.c172
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.h20
-rw-r--r--app/test-crypto-perf/cperf_test_verify.c47
-rw-r--r--app/test-crypto-perf/cperf_test_verify.h5
-rw-r--r--app/test-crypto-perf/data/aes_cbc_128_sha.data2
-rw-r--r--app/test-crypto-perf/data/aes_cbc_192_sha.data2
-rw-r--r--app/test-crypto-perf/data/aes_cbc_256_sha.data2
-rw-r--r--app/test-crypto-perf/main.c183
-rw-r--r--app/test-eventdev/Makefile54
-rw-r--r--app/test-eventdev/evt_common.h116
-rw-r--r--app/test-eventdev/evt_main.c227
-rw-r--r--app/test-eventdev/evt_options.c341
-rw-r--r--app/test-eventdev/evt_options.h277
-rw-r--r--app/test-eventdev/evt_test.c70
-rw-r--r--app/test-eventdev/evt_test.h125
-rw-r--r--app/test-eventdev/parser.c388
-rw-r--r--app/test-eventdev/parser.h79
-rw-r--r--app/test-eventdev/test_order_atq.c232
-rw-r--r--app/test-eventdev/test_order_common.c380
-rw-r--r--app/test-eventdev/test_order_common.h153
-rw-r--r--app/test-eventdev/test_order_queue.c242
-rw-r--r--app/test-eventdev/test_perf_atq.c277
-rw-r--r--app/test-eventdev/test_perf_common.c497
-rw-r--r--app/test-eventdev/test_perf_common.h169
-rw-r--r--app/test-eventdev/test_perf_queue.c288
-rw-r--r--app/test-pmd/Makefile4
-rw-r--r--app/test-pmd/cmdline.c816
-rw-r--r--app/test-pmd/cmdline_flow.c119
-rw-r--r--app/test-pmd/config.c160
-rw-r--r--app/test-pmd/csumonly.c7
-rw-r--r--app/test-pmd/flowgen.c1
-rw-r--r--app/test-pmd/iofwd.c1
-rw-r--r--app/test-pmd/macfwd.c1
-rw-r--r--app/test-pmd/macswap.c1
-rw-r--r--app/test-pmd/parameters.c34
-rw-r--r--app/test-pmd/rxonly.c1
-rw-r--r--app/test-pmd/testpmd.c115
-rw-r--r--app/test-pmd/testpmd.h19
-rw-r--r--app/test-pmd/txonly.c2
51 files changed, 5869 insertions, 583 deletions
diff --git a/app/Makefile b/app/Makefile
index c3aeebf6..7ea02b01 100644
--- a/app/Makefile
+++ b/app/Makefile
@@ -39,4 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
DIRS-$(CONFIG_RTE_APP_CRYPTO_PERF) += test-crypto-perf
endif
+ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
+DIRS-$(CONFIG_RTE_APP_EVENTDEV) += test-eventdev
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/app/proc_info/main.c b/app/proc_info/main.c
index d4f6a823..8b753a2e 100644
--- a/app/proc_info/main.c
+++ b/app/proc_info/main.c
@@ -53,7 +53,6 @@
#include <rte_tailq.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_debug.h>
#include <rte_log.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
diff --git a/app/test-crypto-perf/cperf.h b/app/test-crypto-perf/cperf.h
index 293ba940..c9f7f817 100644
--- a/app/test-crypto-perf/cperf.h
+++ b/app/test-crypto-perf/cperf.h
@@ -41,7 +41,10 @@ struct cperf_options;
struct cperf_test_vector;
struct cperf_op_fns;
-typedef void *(*cperf_constructor_t)(uint8_t dev_id, uint16_t qp_id,
+typedef void *(*cperf_constructor_t)(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *t_vec,
const struct cperf_op_fns *op_fns);
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index c2c3db57..88fb9725 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -40,7 +40,8 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector __rte_unused)
+ const struct cperf_test_vector *test_vector __rte_unused,
+ uint16_t iv_offset __rte_unused)
{
uint16_t i;
@@ -65,7 +66,8 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector __rte_unused)
+ const struct cperf_test_vector *test_vector __rte_unused,
+ uint16_t iv_offset __rte_unused)
{
uint16_t i;
@@ -90,7 +92,8 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -103,10 +106,6 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
@@ -117,6 +116,17 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
sym_op->cipher.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->cipher_iv.data,
+ test_vector->cipher_iv.length);
+
+ }
+ }
+
return 0;
}
@@ -125,7 +135,8 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -137,12 +148,19 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
+ if (test_vector->auth_iv.length) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *,
+ iv_offset);
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+
/* authentication parameters */
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->test_buffer_size;
@@ -151,24 +169,19 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
- sym_op->auth.digest.length = options->auth_digest_sz;
- sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
- sym_op->auth.aad.data = test_vector->aad.data;
- sym_op->auth.aad.length = options->auth_aad_sz;
}
@@ -182,6 +195,17 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
sym_op->auth.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ if (test_vector->auth_iv.length) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+ }
+ }
return 0;
}
@@ -190,7 +214,8 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -203,10 +228,6 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
@@ -221,7 +242,6 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->test_buffer_size;
@@ -230,24 +250,19 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
- sym_op->auth.digest.length = options->auth_digest_sz;
- sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
- sym_op->auth.aad.data = test_vector->aad.data;
- sym_op->auth.aad.length = options->auth_aad_sz;
}
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
@@ -260,6 +275,26 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->auth.data.offset = 0;
}
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->cipher_iv.data,
+ test_vector->cipher_iv.length);
+ if (test_vector->auth_iv.length) {
+ /*
+ * Copy IV after the crypto operation and
+ * the cipher IV
+ */
+ iv_ptr += test_vector->cipher_iv.length;
+ memcpy(iv_ptr, test_vector->auth_iv.data,
+ test_vector->auth_iv.length);
+ }
+ }
+
+ }
+
return 0;
}
@@ -268,7 +303,8 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
uint16_t i;
@@ -280,68 +316,69 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
- /* cipher parameters */
- sym_op->cipher.iv.data = test_vector->iv.data;
- sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
- sym_op->cipher.iv.length = test_vector->iv.length;
-
- sym_op->cipher.data.length = options->test_buffer_size;
- sym_op->cipher.data.offset =
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
+ /* AEAD parameters */
+ sym_op->aead.data.length = options->test_buffer_size;
+ sym_op->aead.data.offset =
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16);
- sym_op->auth.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
- sym_op->auth.aad.length = options->auth_aad_sz;
+ sym_op->aead.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
- /* authentication parameters */
- if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- sym_op->auth.digest.data = test_vector->digest.data;
- sym_op->auth.digest.phys_addr =
+ if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
+ sym_op->aead.digest.data = test_vector->digest.data;
+ sym_op->aead.digest.phys_addr =
test_vector->digest.phys_addr;
- sym_op->auth.digest.length = options->auth_digest_sz;
} else {
- uint32_t offset = sym_op->cipher.data.length +
- sym_op->cipher.data.offset;
+ uint32_t offset = sym_op->aead.data.length +
+ sym_op->aead.data.offset;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
} else {
- buf = bufs_in[i];
-
- tbuf = buf;
+ tbuf = bufs_in[i];
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ buf = tbuf;
}
- sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
- sym_op->auth.digest.phys_addr =
+ sym_op->aead.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
-
- sym_op->auth.digest.length = options->auth_digest_sz;
}
+ }
- sym_op->auth.data.length = options->test_buffer_size;
- sym_op->auth.data.offset = options->auth_aad_sz;
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ for (i = 0; i < nb_ops; i++) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, iv_offset);
+
+ memcpy(iv_ptr, test_vector->aead_iv.data,
+ test_vector->aead_iv.length);
+ }
}
return 0;
}
static struct rte_cryptodev_sym_session *
-cperf_create_session(uint8_t dev_id,
+cperf_create_session(struct rte_mempool *sess_mp,
+ uint8_t dev_id,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset)
{
struct rte_crypto_sym_xform cipher_xform;
struct rte_crypto_sym_xform auth_xform;
+ struct rte_crypto_sym_xform aead_xform;
struct rte_cryptodev_sym_session *sess = NULL;
+ sess = rte_cryptodev_sym_session_create(sess_mp);
/*
* cipher only
*/
@@ -350,6 +387,7 @@ cperf_create_session(uint8_t dev_id,
cipher_xform.next = NULL;
cipher_xform.cipher.algo = options->cipher_algo;
cipher_xform.cipher.op = options->cipher_op;
+ cipher_xform.cipher.iv.offset = iv_offset;
/* cipher different than null */
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
@@ -357,12 +395,16 @@ cperf_create_session(uint8_t dev_id,
test_vector->cipher_key.data;
cipher_xform.cipher.key.length =
test_vector->cipher_key.length;
+ cipher_xform.cipher.iv.length =
+ test_vector->cipher_iv.length;
} else {
cipher_xform.cipher.key.data = NULL;
cipher_xform.cipher.key.length = 0;
+ cipher_xform.cipher.iv.length = 0;
}
/* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ rte_cryptodev_sym_session_init(dev_id, sess, &cipher_xform,
+ sess_mp);
/*
* auth only
*/
@@ -375,27 +417,26 @@ cperf_create_session(uint8_t dev_id,
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
auth_xform.auth.digest_length =
- options->auth_digest_sz;
- auth_xform.auth.add_auth_data_length =
- options->auth_aad_sz;
+ options->digest_sz;
auth_xform.auth.key.length =
test_vector->auth_key.length;
auth_xform.auth.key.data = test_vector->auth_key.data;
+ auth_xform.auth.iv.length =
+ test_vector->auth_iv.length;
} else {
auth_xform.auth.digest_length = 0;
- auth_xform.auth.add_auth_data_length = 0;
auth_xform.auth.key.length = 0;
auth_xform.auth.key.data = NULL;
+ auth_xform.auth.iv.length = 0;
}
/* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ rte_cryptodev_sym_session_init(dev_id, sess, &auth_xform,
+ sess_mp);
/*
* cipher and auth
*/
} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
- || options->op_type == CPERF_AUTH_THEN_CIPHER
- || options->op_type == CPERF_AEAD) {
-
+ || options->op_type == CPERF_AUTH_THEN_CIPHER) {
/*
* cipher
*/
@@ -403,6 +444,7 @@ cperf_create_session(uint8_t dev_id,
cipher_xform.next = NULL;
cipher_xform.cipher.algo = options->cipher_algo;
cipher_xform.cipher.op = options->cipher_op;
+ cipher_xform.cipher.iv.offset = iv_offset;
/* cipher different than null */
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
@@ -410,9 +452,12 @@ cperf_create_session(uint8_t dev_id,
test_vector->cipher_key.data;
cipher_xform.cipher.key.length =
test_vector->cipher_key.length;
+ cipher_xform.cipher.iv.length =
+ test_vector->cipher_iv.length;
} else {
cipher_xform.cipher.key.data = NULL;
cipher_xform.cipher.key.length = 0;
+ cipher_xform.cipher.iv.length = 0;
}
/*
@@ -425,56 +470,53 @@ cperf_create_session(uint8_t dev_id,
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
- auth_xform.auth.digest_length = options->auth_digest_sz;
- auth_xform.auth.add_auth_data_length =
- options->auth_aad_sz;
- /* auth options for aes gcm */
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
- auth_xform.auth.key.length = 0;
- auth_xform.auth.key.data = NULL;
- } else { /* auth options for others */
- auth_xform.auth.key.length =
+ auth_xform.auth.digest_length = options->digest_sz;
+ auth_xform.auth.iv.length = test_vector->auth_iv.length;
+ auth_xform.auth.key.length =
test_vector->auth_key.length;
- auth_xform.auth.key.data =
- test_vector->auth_key.data;
- }
+ auth_xform.auth.key.data =
+ test_vector->auth_key.data;
} else {
auth_xform.auth.digest_length = 0;
- auth_xform.auth.add_auth_data_length = 0;
auth_xform.auth.key.length = 0;
auth_xform.auth.key.data = NULL;
+ auth_xform.auth.iv.length = 0;
}
- /* create crypto session for aes gcm */
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
- if (options->cipher_op ==
- RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- cipher_xform.next = &auth_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &cipher_xform);
- } else { /* decrypt */
- auth_xform.next = &cipher_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &auth_xform);
- }
- } else { /* create crypto session for other */
- /* cipher then auth */
- if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
- cipher_xform.next = &auth_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &cipher_xform);
- } else { /* auth then cipher */
- auth_xform.next = &cipher_xform;
- /* create crypto session */
- sess = rte_cryptodev_sym_session_create(dev_id,
- &auth_xform);
- }
+ /* cipher then auth */
+ if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ cipher_xform.next = &auth_xform;
+ /* create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &cipher_xform, sess_mp);
+ } else { /* auth then cipher */
+ auth_xform.next = &cipher_xform;
+ /* create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &auth_xform, sess_mp);
}
+ } else { /* options->op_type == CPERF_AEAD */
+ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ aead_xform.next = NULL;
+ aead_xform.aead.algo = options->aead_algo;
+ aead_xform.aead.op = options->aead_op;
+ aead_xform.aead.iv.offset = iv_offset;
+
+ aead_xform.aead.key.data =
+ test_vector->aead_key.data;
+ aead_xform.aead.key.length =
+ test_vector->aead_key.length;
+ aead_xform.aead.iv.length = test_vector->aead_iv.length;
+
+ aead_xform.aead.digest_length = options->digest_sz;
+ aead_xform.aead.aad_length =
+ options->aead_aad_sz;
+
+ /* Create crypto session */
+ rte_cryptodev_sym_session_init(dev_id,
+ sess, &aead_xform, sess_mp);
}
+
return sess;
}
@@ -486,14 +528,14 @@ cperf_get_op_functions(const struct cperf_options *options,
op_fns->sess_create = cperf_create_session;
- if (options->op_type == CPERF_AEAD
- || options->op_type == CPERF_AUTH_THEN_CIPHER
+ if (options->op_type == CPERF_AEAD) {
+ op_fns->populate_ops = cperf_set_ops_aead;
+ return 0;
+ }
+
+ if (options->op_type == CPERF_AUTH_THEN_CIPHER
|| options->op_type == CPERF_CIPHER_THEN_AUTH) {
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
- op_fns->populate_ops = cperf_set_ops_aead;
- else
- op_fns->populate_ops = cperf_set_ops_cipher_auth;
+ op_fns->populate_ops = cperf_set_ops_cipher_auth;
return 0;
}
if (options->op_type == CPERF_AUTH_ONLY) {
diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h
index 1b748daf..1f8fa937 100644
--- a/app/test-crypto-perf/cperf_ops.h
+++ b/app/test-crypto-perf/cperf_ops.h
@@ -41,14 +41,17 @@
typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)(
+ struct rte_mempool *sess_mp,
uint8_t dev_id, const struct cperf_options *options,
- const struct cperf_test_vector *test_vector);
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset);
typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
- const struct cperf_test_vector *test_vector);
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset);
struct cperf_op_fns {
cperf_sessions_create_t sess_create;
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index b928c584..10cd2d8a 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -28,8 +28,16 @@
#define CPERF_AUTH_ALGO ("auth-algo")
#define CPERF_AUTH_OP ("auth-op")
#define CPERF_AUTH_KEY_SZ ("auth-key-sz")
-#define CPERF_AUTH_DIGEST_SZ ("auth-digest-sz")
-#define CPERF_AUTH_AAD_SZ ("auth-aad-sz")
+#define CPERF_AUTH_IV_SZ ("auth-iv-sz")
+
+#define CPERF_AEAD_ALGO ("aead-algo")
+#define CPERF_AEAD_OP ("aead-op")
+#define CPERF_AEAD_KEY_SZ ("aead-key-sz")
+#define CPERF_AEAD_IV_SZ ("aead-iv-sz")
+#define CPERF_AEAD_AAD_SZ ("aead-aad-sz")
+
+#define CPERF_DIGEST_SZ ("digest-sz")
+
#define CPERF_CSV ("csv-friendly")
#define MAX_LIST 32
@@ -76,8 +84,16 @@ struct cperf_options {
enum rte_crypto_auth_operation auth_op;
uint16_t auth_key_sz;
- uint16_t auth_digest_sz;
- uint16_t auth_aad_sz;
+ uint16_t auth_iv_sz;
+
+ enum rte_crypto_aead_algorithm aead_algo;
+ enum rte_crypto_aead_operation aead_op;
+
+ uint16_t aead_key_sz;
+ uint16_t aead_iv_sz;
+ uint16_t aead_aad_sz;
+
+ uint16_t digest_sz;
char device_type[RTE_CRYPTODEV_NAME_LEN];
enum cperf_op_type op_type;
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index d172671f..085aa8fe 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -312,7 +312,7 @@ parse_buffer_sz(struct cperf_options *opts, const char *arg)
&opts->min_buffer_size,
&opts->max_buffer_size);
if (ret < 0) {
- RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+ RTE_LOG(ERR, USER1, "failed to parse buffer size/s\n");
return -1;
}
opts->buffer_size_count = ret;
@@ -543,15 +543,76 @@ parse_auth_key_sz(struct cperf_options *opts, const char *arg)
}
static int
-parse_auth_digest_sz(struct cperf_options *opts, const char *arg)
+parse_digest_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->digest_sz, arg);
+}
+
+static int
+parse_auth_iv_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_iv_sz, arg);
+}
+
+static int
+parse_aead_algo(struct cperf_options *opts, const char *arg)
+{
+ enum rte_crypto_aead_algorithm aead_algo;
+
+ if (rte_cryptodev_get_aead_algo_enum(&aead_algo, arg) < 0) {
+ RTE_LOG(ERR, USER1, "Invalid AEAD algorithm specified\n");
+ return -1;
+ }
+
+ opts->aead_algo = aead_algo;
+
+ return 0;
+}
+
+static int
+parse_aead_op(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map aead_op_namemap[] = {
+ {
+ rte_crypto_aead_operation_strings
+ [RTE_CRYPTO_AEAD_OP_ENCRYPT],
+ RTE_CRYPTO_AEAD_OP_ENCRYPT },
+ {
+ rte_crypto_aead_operation_strings
+ [RTE_CRYPTO_AEAD_OP_DECRYPT],
+ RTE_CRYPTO_AEAD_OP_DECRYPT
+ }
+ };
+
+ int id = get_str_key_id_mapping(aead_op_namemap,
+ RTE_DIM(aead_op_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid AEAD operation specified"
+ "\n");
+ return -1;
+ }
+
+ opts->aead_op = (enum rte_crypto_aead_operation)id;
+
+ return 0;
+}
+
+static int
+parse_aead_key_sz(struct cperf_options *opts, const char *arg)
{
- return parse_uint16_t(&opts->auth_digest_sz, arg);
+ return parse_uint16_t(&opts->aead_key_sz, arg);
}
static int
-parse_auth_aad_sz(struct cperf_options *opts, const char *arg)
+parse_aead_iv_sz(struct cperf_options *opts, const char *arg)
{
- return parse_uint16_t(&opts->auth_aad_sz, arg);
+ return parse_uint16_t(&opts->aead_iv_sz, arg);
+}
+
+static int
+parse_aead_aad_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->aead_aad_sz, arg);
}
static int
@@ -600,8 +661,17 @@ static struct option lgopts[] = {
{ CPERF_AUTH_OP, required_argument, 0, 0 },
{ CPERF_AUTH_KEY_SZ, required_argument, 0, 0 },
- { CPERF_AUTH_DIGEST_SZ, required_argument, 0, 0 },
- { CPERF_AUTH_AAD_SZ, required_argument, 0, 0 },
+ { CPERF_AUTH_IV_SZ, required_argument, 0, 0 },
+
+ { CPERF_AEAD_ALGO, required_argument, 0, 0 },
+ { CPERF_AEAD_OP, required_argument, 0, 0 },
+
+ { CPERF_AEAD_KEY_SZ, required_argument, 0, 0 },
+ { CPERF_AEAD_AAD_SZ, required_argument, 0, 0 },
+ { CPERF_AEAD_IV_SZ, required_argument, 0, 0 },
+
+ { CPERF_DIGEST_SZ, required_argument, 0, 0 },
+
{ CPERF_CSV, no_argument, 0, 0},
{ NULL, 0, 0, 0 }
@@ -650,8 +720,13 @@ cperf_options_default(struct cperf_options *opts)
opts->auth_op = RTE_CRYPTO_AUTH_OP_GENERATE;
opts->auth_key_sz = 64;
- opts->auth_digest_sz = 12;
- opts->auth_aad_sz = 0;
+ opts->auth_iv_sz = 0;
+
+ opts->aead_key_sz = 0;
+ opts->aead_iv_sz = 0;
+ opts->aead_aad_sz = 0;
+
+ opts->digest_sz = 12;
}
static int
@@ -678,9 +753,14 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{ CPERF_AUTH_ALGO, parse_auth_algo },
{ CPERF_AUTH_OP, parse_auth_op },
{ CPERF_AUTH_KEY_SZ, parse_auth_key_sz },
- { CPERF_AUTH_DIGEST_SZ, parse_auth_digest_sz },
- { CPERF_AUTH_AAD_SZ, parse_auth_aad_sz },
- { CPERF_CSV, parse_csv_friendly},
+ { CPERF_AUTH_IV_SZ, parse_auth_iv_sz },
+ { CPERF_AEAD_ALGO, parse_aead_algo },
+ { CPERF_AEAD_OP, parse_aead_op },
+ { CPERF_AEAD_KEY_SZ, parse_aead_key_sz },
+ { CPERF_AEAD_IV_SZ, parse_aead_iv_sz },
+ { CPERF_AEAD_AAD_SZ, parse_aead_aad_sz },
+ { CPERF_DIGEST_SZ, parse_digest_sz },
+ { CPERF_CSV, parse_csv_friendly},
};
unsigned int i;
@@ -717,11 +797,56 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
return 0;
}
-int
-cperf_options_check(struct cperf_options *options)
+static int
+check_cipher_buffer_length(struct cperf_options *options)
{
uint32_t buffer_size, buffer_size_idx = 0;
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB) {
+ if (options->inc_buffer_size != 0)
+ buffer_size = options->min_buffer_size;
+ else
+ buffer_size = options->buffer_size_list[0];
+
+ while (buffer_size <= options->max_buffer_size) {
+ if ((buffer_size % AES_BLOCK_SIZE) != 0) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+ "not suitable for the algorithm selected\n");
+ return -EINVAL;
+ }
+
+ if (options->inc_buffer_size != 0)
+ buffer_size += options->inc_buffer_size;
+ else {
+ if (++buffer_size_idx == options->buffer_size_count)
+ break;
+ buffer_size = options->buffer_size_list[buffer_size_idx];
+ }
+
+ }
+ }
+
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
+ for (buffer_size = options->min_buffer_size;
+ buffer_size < options->max_buffer_size;
+ buffer_size += options->inc_buffer_size) {
+ if ((buffer_size % DES_BLOCK_SIZE) != 0) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+ "not suitable for the algorithm selected\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+cperf_options_check(struct cperf_options *options)
+{
if (options->segments_nb > options->min_buffer_size) {
RTE_LOG(ERR, USER1,
"Segments number greater than buffer size.\n");
@@ -795,68 +920,13 @@ cperf_options_check(struct cperf_options *options)
" options: decrypt and verify.\n");
return -EINVAL;
}
- } else if (options->op_type == CPERF_AEAD) {
- if (!(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
- options->auth_op ==
- RTE_CRYPTO_AUTH_OP_GENERATE) &&
- !(options->cipher_op ==
- RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- options->auth_op ==
- RTE_CRYPTO_AUTH_OP_VERIFY)) {
- RTE_LOG(ERR, USER1, "Use together options: encrypt and"
- " generate or decrypt and verify.\n");
- return -EINVAL;
- }
}
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_CCM ||
- options->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (options->op_type != CPERF_AEAD) {
- RTE_LOG(ERR, USER1, "Use --optype aead\n");
+ if (options->op_type == CPERF_CIPHER_ONLY ||
+ options->op_type == CPERF_CIPHER_THEN_AUTH ||
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (check_cipher_buffer_length(options) < 0)
return -EINVAL;
- }
- }
-
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB) {
- if (options->inc_buffer_size != 0)
- buffer_size = options->min_buffer_size;
- else
- buffer_size = options->buffer_size_list[0];
-
- while (buffer_size <= options->max_buffer_size) {
- if ((buffer_size % AES_BLOCK_SIZE) != 0) {
- RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
- "not suitable for the algorithm selected\n");
- return -EINVAL;
- }
-
- if (options->inc_buffer_size != 0)
- buffer_size += options->inc_buffer_size;
- else {
- if (++buffer_size_idx == options->buffer_size_count)
- break;
- buffer_size = options->buffer_size_list[buffer_size_idx];
- }
-
- }
- }
-
- if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
- options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
- for (buffer_size = options->min_buffer_size;
- buffer_size < options->max_buffer_size;
- buffer_size += options->inc_buffer_size) {
- if ((buffer_size % DES_BLOCK_SIZE) != 0) {
- RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
- "not suitable for the algorithm selected\n");
- return -EINVAL;
- }
- }
}
return 0;
@@ -907,22 +977,20 @@ cperf_options_dump(struct cperf_options *opts)
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
printf("# auth algorithm: %s\n",
rte_crypto_auth_algorithm_strings[opts->auth_algo]);
printf("# auth operation: %s\n",
rte_crypto_auth_operation_strings[opts->auth_op]);
printf("# auth key size: %u\n", opts->auth_key_sz);
- printf("# auth digest size: %u\n", opts->auth_digest_sz);
- printf("# auth aad size: %u\n", opts->auth_aad_sz);
+ printf("# auth iv size: %u\n", opts->auth_iv_sz);
+ printf("# auth digest size: %u\n", opts->digest_sz);
printf("#\n");
}
if (opts->op_type == CPERF_CIPHER_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
printf("# cipher algorithm: %s\n",
rte_crypto_cipher_algorithm_strings[opts->cipher_algo]);
printf("# cipher operation: %s\n",
@@ -931,4 +999,16 @@ cperf_options_dump(struct cperf_options *opts)
printf("# cipher iv size: %u\n", opts->cipher_iv_sz);
printf("#\n");
}
+
+ if (opts->op_type == CPERF_AEAD) {
+ printf("# aead algorithm: %s\n",
+ rte_crypto_aead_algorithm_strings[opts->aead_algo]);
+ printf("# aead operation: %s\n",
+ rte_crypto_aead_operation_strings[opts->aead_op]);
+ printf("# aead key size: %u\n", opts->aead_key_sz);
+ printf("# aead iv size: %u\n", opts->aead_iv_sz);
+ printf("# aead digest size: %u\n", opts->digest_sz);
+ printf("# aead aad size: %u\n", opts->aead_aad_sz);
+ printf("#\n");
+ }
}
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index e61ac972..58b21abd 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -66,6 +66,10 @@ struct cperf_latency_ctx {
struct cperf_op_result *res;
};
+struct priv_op_data {
+ struct cperf_op_result *result;
+};
+
#define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
#define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
@@ -75,8 +79,10 @@ cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -163,14 +169,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -187,7 +193,8 @@ error:
}
void *
-cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_latency_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -207,7 +214,13 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the crypto operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ sizeof(struct cperf_op_result *);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -220,7 +233,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -250,7 +263,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -276,9 +289,14 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = sizeof(struct priv_op_data) +
+ test_vector->cipher_iv.length +
+ test_vector->auth_iv.length +
+ test_vector->aead_iv.length;
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
+
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -295,11 +313,20 @@ err:
return NULL;
}
+static inline void
+store_timestamp(struct rte_crypto_op *op, uint64_t timestamp)
+{
+ struct priv_op_data *priv_data;
+
+ priv_data = (struct priv_op_data *) (op->sym + 1);
+ priv_data->result->status = op->status;
+ priv_data->result->tsc_end = timestamp;
+}
+
int
cperf_latency_test_runner(void *arg)
{
struct cperf_latency_ctx *ctx = arg;
- struct cperf_op_result *pres;
uint16_t test_burst_size;
uint8_t burst_size_idx = 0;
@@ -311,6 +338,7 @@ cperf_latency_test_runner(void *arg)
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
uint64_t i;
+ struct priv_op_data *priv_data;
uint32_t lcore = rte_lcore_id();
@@ -339,6 +367,10 @@ cperf_latency_test_runner(void *arg)
else
test_burst_size = ctx->options->burst_size_list[0];
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ sizeof(struct cperf_op_result *);
+
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_deqd = 0;
uint64_t m_idx = 0, b_idx = 0;
@@ -360,14 +392,20 @@ cperf_latency_test_runner(void *arg)
if (burst_size != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size))
+ ops, burst_size)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
burst_size, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
tsc_start = rte_rdtsc_precise();
@@ -393,12 +431,19 @@ cperf_latency_test_runner(void *arg)
tsc_end = rte_rdtsc_precise();
/* Free memory for not enqueued operations */
- for (i = ops_enqd; i < burst_size; i++)
- rte_crypto_op_free(ops[i]);
+ if (ops_enqd != burst_size)
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)&ops[ops_enqd],
+ burst_size - ops_enqd);
for (i = 0; i < ops_enqd; i++) {
ctx->res[tsc_idx].tsc_start = tsc_start;
- ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
+ /*
+ * Private data structure starts after the end of the
+ * rte_crypto_sym_op structure.
+ */
+ priv_data = (struct priv_op_data *) (ops[i]->sym + 1);
+ priv_data->result = (void *)&ctx->res[tsc_idx];
tsc_idx++;
}
@@ -409,14 +454,11 @@ cperf_latency_test_runner(void *arg)
* the crypto operation will change the data and cause
* failures.
*/
- for (i = 0; i < ops_deqd; i++) {
- pres = (struct cperf_op_result *)
- (ops_processed[i]->opaque_data);
- pres->status = ops_processed[i]->status;
- pres->tsc_end = tsc_end;
+ for (i = 0; i < ops_deqd; i++)
+ store_timestamp(ops_processed[i], tsc_end);
- rte_crypto_op_free(ops_processed[i]);
- }
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
deqd_max = max(ops_deqd, deqd_max);
@@ -445,14 +487,11 @@ cperf_latency_test_runner(void *arg)
tsc_end = rte_rdtsc_precise();
if (ops_deqd != 0) {
- for (i = 0; i < ops_deqd; i++) {
- pres = (struct cperf_op_result *)
- (ops_processed[i]->opaque_data);
- pres->status = ops_processed[i]->status;
- pres->tsc_end = tsc_end;
+ for (i = 0; i < ops_deqd; i++)
+ store_timestamp(ops_processed[i], tsc_end);
- rte_crypto_op_free(ops_processed[i]);
- }
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
deqd_max = max(ops_deqd, deqd_max);
@@ -547,6 +586,7 @@ cperf_latency_test_destructor(void *arg)
if (ctx == NULL)
return;
- cperf_latency_test_free(ctx, ctx->options->pool_sz);
+ rte_cryptodev_stop(ctx->dev_id);
+ cperf_latency_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_latency.h b/app/test-crypto-perf/cperf_test_latency.h
index 6a2cf610..1bbedb4e 100644
--- a/app/test-crypto-perf/cperf_test_latency.h
+++ b/app/test-crypto-perf/cperf_test_latency.h
@@ -43,7 +43,10 @@
#include "cperf_test_vectors.h"
void *
-cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_latency_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 61b27ea5..3bb1cb05 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -64,8 +64,10 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -151,14 +153,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -175,7 +177,8 @@ error:
}
void *
-cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -195,7 +198,12 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the cryptop operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -208,7 +216,7 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -236,7 +244,7 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -262,9 +270,12 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = test_vector->cipher_iv.length +
+ test_vector->auth_iv.length + test_vector->aead_iv.length;
+
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -315,6 +326,9 @@ cperf_throughput_test_runner(void *test_ctx)
else
test_burst_size = ctx->options->burst_size_list[0];
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
@@ -339,14 +353,20 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed))
+ ops, ops_needed)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
/**
* When ops_needed is smaller than ops_enqd, the
@@ -396,8 +416,8 @@ cperf_throughput_test_runner(void *test_ctx)
* the crypto operation will change the data and cause
* failures.
*/
- for (i = 0; i < ops_deqd; i++)
- rte_crypto_op_free(ops_processed[i]);
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
} else {
@@ -426,8 +446,8 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_deqd == 0)
ops_deqd_failed++;
else {
- for (i = 0; i < ops_deqd; i++)
- rte_crypto_op_free(ops_processed[i]);
+ rte_mempool_put_bulk(ctx->crypto_op_pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
@@ -471,14 +491,14 @@ cperf_throughput_test_runner(void *test_ctx)
cycles_per_packet);
} else {
if (!only_once)
- printf("# lcore id, Buffer Size(B),"
+ printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
"Cycles/Buf\n\n");
only_once = 1;
- printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
- "%.f3;%.f3;%.f3\n",
+ printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%.3f;%.3f;%.3f\n",
ctx->lcore_id,
ctx->options->test_buffer_size,
test_burst_size,
@@ -514,5 +534,7 @@ cperf_throughput_test_destructor(void *arg)
if (ctx == NULL)
return;
+ rte_cryptodev_stop(ctx->dev_id);
+
cperf_throughput_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_throughput.h b/app/test-crypto-perf/cperf_test_throughput.h
index f1b5766c..987d0c31 100644
--- a/app/test-crypto-perf/cperf_test_throughput.h
+++ b/app/test-crypto-perf/cperf_test_throughput.h
@@ -44,7 +44,10 @@
void *
-cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_throughput_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index f384e3d9..148a6041 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -1,3 +1,34 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
#ifdef RTE_EXEC_ENV_BSDAPP
#define _WITH_GETLINE
#endif
@@ -15,7 +46,8 @@ free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts)
if (vector == NULL || opts == NULL)
return -1;
- rte_free(vector->iv.data);
+ rte_free(vector->cipher_iv.data);
+ rte_free(vector->auth_iv.data);
rte_free(vector->aad.data);
rte_free(vector->digest.data);
@@ -84,15 +116,28 @@ show_test_vector(struct cperf_test_vector *test_vector)
printf("\n");
}
- if (test_vector->iv.data) {
- printf("\niv =\n");
- for (i = 0; i < test_vector->iv.length; ++i) {
+ if (test_vector->cipher_iv.data) {
+ printf("\ncipher_iv =\n");
+ for (i = 0; i < test_vector->cipher_iv.length; ++i) {
if ((i % wrap == 0) && (i != 0))
printf("\n");
- if (i == (uint32_t)(test_vector->iv.length - 1))
- printf("0x%02x", test_vector->iv.data[i]);
+ if (i == (uint32_t)(test_vector->cipher_iv.length - 1))
+ printf("0x%02x", test_vector->cipher_iv.data[i]);
else
- printf("0x%02x, ", test_vector->iv.data[i]);
+ printf("0x%02x, ", test_vector->cipher_iv.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->auth_iv.data) {
+ printf("\nauth_iv =\n");
+ for (i = 0; i < test_vector->auth_iv.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->auth_iv.length - 1))
+ printf("0x%02x", test_vector->auth_iv.data[i]);
+ else
+ printf("0x%02x, ", test_vector->auth_iv.data[i]);
}
printf("\n");
}
@@ -300,19 +345,32 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
vector->auth_key.length = opts->auth_key_sz;
}
- } else if (strstr(key_token, "iv")) {
- rte_free(vector->iv.data);
- vector->iv.data = data;
- vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data);
+ } else if (strstr(key_token, "cipher_iv")) {
+ rte_free(vector->cipher_iv.data);
+ vector->cipher_iv.data = data;
if (tc_found)
- vector->iv.length = data_length;
+ vector->cipher_iv.length = data_length;
else {
if (opts->cipher_iv_sz > data_length) {
- printf("Global iv shorter than "
+ printf("Global cipher iv shorter than "
"cipher_iv_sz\n");
return -1;
}
- vector->iv.length = opts->cipher_iv_sz;
+ vector->cipher_iv.length = opts->cipher_iv_sz;
+ }
+
+ } else if (strstr(key_token, "auth_iv")) {
+ rte_free(vector->auth_iv.data);
+ vector->auth_iv.data = data;
+ if (tc_found)
+ vector->auth_iv.length = data_length;
+ else {
+ if (opts->auth_iv_sz > data_length) {
+ printf("Global auth iv shorter than "
+ "auth_iv_sz\n");
+ return -1;
+ }
+ vector->auth_iv.length = opts->auth_iv_sz;
}
} else if (strstr(key_token, "ciphertext")) {
@@ -336,12 +394,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
if (tc_found)
vector->aad.length = data_length;
else {
- if (opts->auth_aad_sz > data_length) {
+ if (opts->aead_aad_sz > data_length) {
printf("Global aad shorter than "
- "auth_aad_sz\n");
+ "aead_aad_sz\n");
return -1;
}
- vector->aad.length = opts->auth_aad_sz;
+ vector->aad.length = opts->aead_aad_sz;
}
} else if (strstr(key_token, "digest")) {
@@ -352,12 +410,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
if (tc_found)
vector->digest.length = data_length;
else {
- if (opts->auth_digest_sz > data_length) {
+ if (opts->digest_sz > data_length) {
printf("Global digest shorter than "
- "auth_digest_sz\n");
+ "digest_sz\n");
return -1;
}
- vector->digest.length = opts->auth_digest_sz;
+ vector->digest.length = opts->digest_sz;
}
} else {
printf("Not valid key: '%s'\n", trim_space(key_token));
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index 757957f7..e51dcc3f 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -1,3 +1,35 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include <rte_crypto.h>
#include <rte_malloc.h>
@@ -385,6 +417,13 @@ uint8_t auth_key[] = {
0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F
};
+/* AEAD key */
+uint8_t aead_key[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+
/* Digests */
uint8_t digest[2048] = { 0x00 };
@@ -403,98 +442,127 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
if (options->op_type == CPERF_CIPHER_ONLY ||
options->op_type == CPERF_CIPHER_THEN_AUTH ||
- options->op_type == CPERF_AUTH_THEN_CIPHER ||
- options->op_type == CPERF_AEAD) {
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
t_vec->cipher_key.length = 0;
t_vec->ciphertext.data = plaintext;
t_vec->cipher_key.data = NULL;
- t_vec->iv.data = NULL;
+ t_vec->cipher_iv.data = NULL;
} else {
t_vec->cipher_key.length = options->cipher_key_sz;
t_vec->ciphertext.data = ciphertext;
t_vec->cipher_key.data = cipher_key;
- t_vec->iv.data = rte_malloc(NULL, options->cipher_iv_sz,
+ t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
16);
- if (t_vec->iv.data == NULL) {
+ if (t_vec->cipher_iv.data == NULL) {
rte_free(t_vec);
return NULL;
}
- memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
+ memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
}
t_vec->ciphertext.length = options->max_buffer_size;
- t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
- t_vec->iv.length = options->cipher_iv_sz;
+
+ /* Set IV parameters */
+ t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
+ 16);
+ if (options->cipher_iv_sz && t_vec->cipher_iv.data == NULL) {
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
+ t_vec->cipher_iv.length = options->cipher_iv_sz;
+
t_vec->data.cipher_offset = 0;
t_vec->data.cipher_length = options->max_buffer_size;
+
}
if (options->op_type == CPERF_AUTH_ONLY ||
options->op_type == CPERF_CIPHER_THEN_AUTH ||
- options->op_type == CPERF_AUTH_THEN_CIPHER ||
- options->op_type == CPERF_AEAD) {
- uint8_t aad_alloc = 0;
-
- t_vec->auth_key.length = options->auth_key_sz;
-
- switch (options->auth_algo) {
- case RTE_CRYPTO_AUTH_NULL:
- t_vec->auth_key.data = NULL;
- aad_alloc = 0;
- break;
- case RTE_CRYPTO_AUTH_AES_GCM:
+ options->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (options->auth_algo == RTE_CRYPTO_AUTH_NULL) {
+ t_vec->auth_key.length = 0;
t_vec->auth_key.data = NULL;
- aad_alloc = 1;
- break;
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- t_vec->auth_key.data = auth_key;
- aad_alloc = 1;
- break;
- case RTE_CRYPTO_AUTH_AES_GMAC:
- /* auth key should be the same as cipher key */
- t_vec->auth_key.data = cipher_key;
- aad_alloc = 1;
- break;
- default:
+ t_vec->digest.data = NULL;
+ t_vec->digest.length = 0;
+ } else {
+ t_vec->auth_key.length = options->auth_key_sz;
t_vec->auth_key.data = auth_key;
- aad_alloc = 0;
- break;
+
+ t_vec->digest.data = rte_malloc(NULL,
+ options->digest_sz,
+ 16);
+ if (t_vec->digest.data == NULL) {
+ rte_free(t_vec->cipher_iv.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ t_vec->digest.phys_addr =
+ rte_malloc_virt2phy(t_vec->digest.data);
+ t_vec->digest.length = options->digest_sz;
+ memcpy(t_vec->digest.data, digest,
+ options->digest_sz);
}
+ t_vec->data.auth_offset = 0;
+ t_vec->data.auth_length = options->max_buffer_size;
+
+ /* Set IV parameters */
+ t_vec->auth_iv.data = rte_malloc(NULL, options->auth_iv_sz,
+ 16);
+ if (options->auth_iv_sz && t_vec->auth_iv.data == NULL) {
+ if (options->op_type != CPERF_AUTH_ONLY)
+ rte_free(t_vec->cipher_iv.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->auth_iv.data, iv, options->auth_iv_sz);
+ t_vec->auth_iv.length = options->auth_iv_sz;
+ }
+
+ if (options->op_type == CPERF_AEAD) {
+ t_vec->aead_key.length = options->aead_key_sz;
+ t_vec->aead_key.data = aead_key;
- if (aad_alloc && options->auth_aad_sz) {
+ if (options->aead_aad_sz) {
t_vec->aad.data = rte_malloc(NULL,
- options->auth_aad_sz, 16);
+ options->aead_aad_sz, 16);
if (t_vec->aad.data == NULL) {
- if (options->op_type != CPERF_AUTH_ONLY)
- rte_free(t_vec->iv.data);
rte_free(t_vec);
return NULL;
}
- memcpy(t_vec->aad.data, aad, options->auth_aad_sz);
+ memcpy(t_vec->aad.data, aad, options->aead_aad_sz);
+ t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.length = options->aead_aad_sz;
} else {
t_vec->aad.data = NULL;
+ t_vec->aad.length = 0;
}
- t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
- t_vec->aad.length = options->auth_aad_sz;
- t_vec->digest.data = rte_malloc(NULL, options->auth_digest_sz,
- 16);
+ t_vec->digest.data = rte_malloc(NULL, options->digest_sz,
+ 16);
if (t_vec->digest.data == NULL) {
- if (options->op_type != CPERF_AUTH_ONLY)
- rte_free(t_vec->iv.data);
rte_free(t_vec->aad.data);
rte_free(t_vec);
return NULL;
}
t_vec->digest.phys_addr =
rte_malloc_virt2phy(t_vec->digest.data);
- t_vec->digest.length = options->auth_digest_sz;
- memcpy(t_vec->digest.data, digest, options->auth_digest_sz);
- t_vec->data.auth_offset = 0;
- t_vec->data.auth_length = options->max_buffer_size;
- }
+ t_vec->digest.length = options->digest_sz;
+ memcpy(t_vec->digest.data, digest, options->digest_sz);
+ t_vec->data.aead_offset = 0;
+ t_vec->data.aead_length = options->max_buffer_size;
+ /* Set IV parameters */
+ t_vec->aead_iv.data = rte_malloc(NULL, options->aead_iv_sz,
+ 16);
+ if (options->aead_iv_sz && t_vec->aead_iv.data == NULL) {
+ rte_free(t_vec->aad.data);
+ rte_free(t_vec->digest.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->aead_iv.data, iv, options->aead_iv_sz);
+ t_vec->aead_iv.length = options->aead_iv_sz;
+ }
return t_vec;
}
diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h
index e64f1168..85955703 100644
--- a/app/test-crypto-perf/cperf_test_vectors.h
+++ b/app/test-crypto-perf/cperf_test_vectors.h
@@ -53,9 +53,23 @@ struct cperf_test_vector {
struct {
uint8_t *data;
- phys_addr_t phys_addr;
uint16_t length;
- } iv;
+ } aead_key;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } cipher_iv;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } auth_iv;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } aead_iv;
struct {
uint8_t *data;
@@ -79,6 +93,8 @@ struct cperf_test_vector {
uint32_t auth_length;
uint32_t cipher_offset;
uint32_t cipher_length;
+ uint32_t aead_offset;
+ uint32_t aead_length;
} data;
};
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 454221e6..a314646c 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -68,8 +68,10 @@ cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
uint32_t i;
if (ctx) {
- if (ctx->sess)
- rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
@@ -155,14 +157,14 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (options->op_type != CPERF_CIPHER_ONLY) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->auth_digest_sz);
+ options->digest_sz);
if (mbuf_data == NULL)
goto error;
}
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+ RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
if (aead == NULL)
goto error;
@@ -179,7 +181,8 @@ error:
}
void *
-cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_verify_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
@@ -199,7 +202,12 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
ctx->options = options;
ctx->test_vector = test_vector;
- ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ /* IV goes at the end of the cryptop operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(sess_mp, dev_id, options, test_vector,
+ iv_offset);
if (ctx->sess == NULL)
goto err;
@@ -212,7 +220,7 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_CACHE_LINE_ROUNDUP(
(options->max_buffer_size / options->segments_nb) +
(options->max_buffer_size % options->segments_nb) +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -240,7 +248,7 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->max_buffer_size +
- options->auth_digest_sz),
+ options->digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -266,9 +274,11 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
+ uint16_t priv_size = test_vector->cipher_iv.length +
+ test_vector->auth_iv.length + test_vector->aead_iv.length;
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
- rte_socket_id());
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
+ 512, priv_size, rte_socket_id());
if (ctx->crypto_op_pool == NULL)
goto err;
@@ -373,7 +383,7 @@ cperf_verify_op(struct rte_crypto_op *op,
if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
res += memcmp(data + auth_offset,
vector->digest.data,
- options->auth_digest_sz);
+ options->digest_sz);
}
return !!res;
@@ -417,6 +427,9 @@ cperf_verify_test_runner(void *test_ctx)
printf("\n# Running verify test on device: %u, lcore: %u\n",
ctx->dev_id, lcore);
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
while (ops_enqd_total < ctx->options->total_ops) {
uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
@@ -431,14 +444,20 @@ cperf_verify_test_runner(void *test_ctx)
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed))
+ ops, ops_needed)) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
return -1;
+ }
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
- ctx->test_vector);
+ ctx->test_vector, iv_offset);
#ifdef CPERF_LINEARIZATION_ENABLE
if (linearize) {
@@ -575,5 +594,7 @@ cperf_verify_test_destructor(void *arg)
if (ctx == NULL)
return;
+ rte_cryptodev_stop(ctx->dev_id);
+
cperf_verify_test_free(ctx, ctx->options->pool_sz);
}
diff --git a/app/test-crypto-perf/cperf_test_verify.h b/app/test-crypto-perf/cperf_test_verify.h
index 3fa78ee6..e67b48d3 100644
--- a/app/test-crypto-perf/cperf_test_verify.h
+++ b/app/test-crypto-perf/cperf_test_verify.h
@@ -44,7 +44,10 @@
void *
-cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+cperf_verify_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *ops_fn);
diff --git a/app/test-crypto-perf/data/aes_cbc_128_sha.data b/app/test-crypto-perf/data/aes_cbc_128_sha.data
index 0b054f5a..ff555903 100644
--- a/app/test-crypto-perf/data/aes_cbc_128_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_128_sha.data
@@ -282,7 +282,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/data/aes_cbc_192_sha.data b/app/test-crypto-perf/data/aes_cbc_192_sha.data
index 7bfe3da7..3f85a004 100644
--- a/app/test-crypto-perf/data/aes_cbc_192_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_192_sha.data
@@ -283,7 +283,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/data/aes_cbc_256_sha.data b/app/test-crypto-perf/data/aes_cbc_256_sha.data
index 52dafb93..8da81611 100644
--- a/app/test-crypto-perf/data/aes_cbc_256_sha.data
+++ b/app/test-crypto-perf/data/aes_cbc_256_sha.data
@@ -283,7 +283,7 @@ auth_key =
0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
-iv =
+cipher_iv =
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
####################
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 9ec2a4b4..99f5d3e0 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -1,3 +1,35 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include <stdio.h>
#include <unistd.h>
@@ -11,6 +43,9 @@
#include "cperf_test_latency.h"
#include "cperf_test_verify.h"
+#define NUM_SESSIONS 2048
+#define SESS_MEMPOOL_CACHE_SIZE 64
+
const char *cperf_test_type_strs[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
[CPERF_TEST_TYPE_LATENCY] = "latency",
@@ -44,9 +79,11 @@ const struct cperf_test cperf_testmap[] = {
};
static int
-cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
+cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
+ struct rte_mempool *session_pool_socket[])
{
- uint8_t cdev_id, enabled_cdev_count = 0, nb_lcores;
+ uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
+ unsigned int i;
int ret;
enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
@@ -66,40 +103,74 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
return -EINVAL;
}
- for (cdev_id = 0; cdev_id < enabled_cdev_count &&
- cdev_id < RTE_CRYPTO_MAX_DEVS; cdev_id++) {
+ /* Create a mempool shared by all the devices */
+ uint32_t max_sess_size = 0, sess_size;
+
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ sess_size = rte_cryptodev_get_private_session_size(cdev_id);
+ if (sess_size > max_sess_size)
+ max_sess_size = sess_size;
+ }
+
+ for (i = 0; i < enabled_cdev_count &&
+ i < RTE_CRYPTO_MAX_DEVS; i++) {
+ cdev_id = enabled_cdevs[i];
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
struct rte_cryptodev_config conf = {
.nb_queue_pairs = 1,
- .socket_id = SOCKET_ID_ANY,
- .session_mp = {
- .nb_objs = 2048,
- .cache_size = 64
- }
- };
+ .socket_id = socket_id
+ };
+
struct rte_cryptodev_qp_conf qp_conf = {
.nb_descriptors = 2048
};
- ret = rte_cryptodev_configure(enabled_cdevs[cdev_id], &conf);
+
+ if (session_pool_socket[socket_id] == NULL) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+
+ sess_mp = rte_mempool_create(mp_name,
+ NUM_SESSIONS,
+ max_sess_size,
+ SESS_MEMPOOL_CACHE_SIZE,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+
+ if (sess_mp == NULL) {
+ printf("Cannot create session pool on socket %d\n",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ printf("Allocated session pool on socket %d\n", socket_id);
+ session_pool_socket[socket_id] = sess_mp;
+ }
+
+ ret = rte_cryptodev_configure(cdev_id, &conf);
if (ret < 0) {
- printf("Failed to configure cryptodev %u",
- enabled_cdevs[cdev_id]);
+ printf("Failed to configure cryptodev %u", cdev_id);
return -EINVAL;
}
- ret = rte_cryptodev_queue_pair_setup(enabled_cdevs[cdev_id], 0,
- &qp_conf, SOCKET_ID_ANY);
+ ret = rte_cryptodev_queue_pair_setup(cdev_id, 0,
+ &qp_conf, socket_id,
+ session_pool_socket[socket_id]);
if (ret < 0) {
printf("Failed to setup queue pair %u on "
"cryptodev %u", 0, cdev_id);
return -EINVAL;
}
- ret = rte_cryptodev_start(enabled_cdevs[cdev_id]);
+ ret = rte_cryptodev_start(cdev_id);
if (ret < 0) {
printf("Failed to start device %u: error %d\n",
- enabled_cdevs[cdev_id], ret);
+ cdev_id, ret);
return -EPERM;
}
}
@@ -123,8 +194,7 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
cap_idx.algo.auth = opts->auth_algo;
@@ -137,16 +207,15 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
ret = rte_cryptodev_sym_capability_check_auth(
capability,
opts->auth_key_sz,
- opts->auth_digest_sz,
- opts->auth_aad_sz);
+ opts->digest_sz,
+ opts->auth_iv_sz);
if (ret != 0)
return ret;
}
if (opts->op_type == CPERF_CIPHER_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
- opts->op_type == CPERF_AUTH_THEN_CIPHER ||
- opts->op_type == CPERF_AEAD) {
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cap_idx.algo.cipher = opts->cipher_algo;
@@ -163,6 +232,26 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
if (ret != 0)
return ret;
}
+
+ if (opts->op_type == CPERF_AEAD) {
+
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ cap_idx.algo.aead = opts->aead_algo;
+
+ capability = rte_cryptodev_sym_capability_get(cdev_id,
+ &cap_idx);
+ if (capability == NULL)
+ return -1;
+
+ ret = rte_cryptodev_sym_capability_check_aead(
+ capability,
+ opts->aead_key_sz,
+ opts->digest_sz,
+ opts->aead_aad_sz,
+ opts->aead_iv_sz);
+ if (ret != 0)
+ return ret;
+ }
}
return 0;
@@ -185,9 +274,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->iv.data == NULL)
+ if (test_vec->cipher_iv.data == NULL)
return -1;
- if (test_vec->iv.length != opts->cipher_iv_sz)
+ if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
@@ -204,9 +293,14 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
+ if (test_vec->auth_iv.length != opts->auth_iv_sz)
+ return -1;
+ /* Auth IV is only required for some algorithms */
+ if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
+ return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
@@ -226,9 +320,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->iv.data == NULL)
+ if (test_vec->cipher_iv.data == NULL)
return -1;
- if (test_vec->iv.length != opts->cipher_iv_sz)
+ if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
@@ -240,9 +334,14 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
+ if (test_vec->auth_iv.length != opts->auth_iv_sz)
+ return -1;
+ /* Auth IV is only required for some algorithms */
+ if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
+ return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
} else if (opts->op_type == CPERF_AEAD) {
@@ -254,13 +353,17 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
+ if (test_vec->aead_iv.data == NULL)
+ return -1;
+ if (test_vec->aead_iv.length != opts->aead_iv_sz)
+ return -1;
if (test_vec->aad.data == NULL)
return -1;
- if (test_vec->aad.length != opts->auth_aad_sz)
+ if (test_vec->aad.length != opts->aead_aad_sz)
return -1;
if (test_vec->digest.data == NULL)
return -1;
- if (test_vec->digest.length < opts->auth_digest_sz)
+ if (test_vec->digest.length < opts->digest_sz)
return -1;
}
return 0;
@@ -274,6 +377,7 @@ main(int argc, char **argv)
struct cperf_op_fns op_fns;
void *ctx[RTE_MAX_LCORE] = { };
+ struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
int nb_cryptodevs = 0;
uint8_t cdev_id, i;
@@ -309,7 +413,8 @@ main(int argc, char **argv)
if (!opts.silent)
cperf_options_dump(&opts);
- nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
+ nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
+ session_pool_socket);
if (nb_cryptodevs < 1) {
RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
"device type\n");
@@ -367,7 +472,10 @@ main(int argc, char **argv)
cdev_id = enabled_cdevs[i];
- ctx[cdev_id] = cperf_testmap[opts.test].constructor(cdev_id, 0,
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+
+ ctx[cdev_id] = cperf_testmap[opts.test].constructor(
+ session_pool_socket[socket_id], cdev_id, 0,
&opts, t_vec, &op_fns);
if (ctx[cdev_id] == NULL) {
RTE_LOG(ERR, USER1, "Test run constructor failed\n");
@@ -395,7 +503,14 @@ main(int argc, char **argv)
ctx[cdev_id], lcore_id);
i++;
}
- rte_eal_mp_wait_lcore();
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+ rte_eal_wait_lcore(lcore_id);
+ i++;
+ }
/* Get next size from range or list */
if (opts.inc_buffer_size != 0)
diff --git a/app/test-eventdev/Makefile b/app/test-eventdev/Makefile
new file mode 100644
index 00000000..dcb2ac47
--- /dev/null
+++ b/app/test-eventdev/Makefile
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+APP = dpdk-test-eventdev
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := evt_main.c
+SRCS-y += evt_options.c
+SRCS-y += evt_test.c
+SRCS-y += parser.c
+
+SRCS-y += test_order_common.c
+SRCS-y += test_order_queue.c
+SRCS-y += test_order_atq.c
+
+SRCS-y += test_perf_common.c
+SRCS-y += test_perf_queue.c
+SRCS-y += test_perf_atq.c
+
+include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
new file mode 100644
index 00000000..41020760
--- /dev/null
+++ b/app/test-eventdev/evt_common.h
@@ -0,0 +1,116 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EVT_COMMON_
+#define _EVT_COMMON_
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_eventdev.h>
+
+#define CLNRM "\x1b[0m"
+#define CLRED "\x1b[31m"
+#define CLGRN "\x1b[32m"
+#define CLYEL "\x1b[33m"
+
+#define evt_err(fmt, args...) \
+ fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
+
+#define evt_info(fmt, args...) \
+ fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
+
+#define EVT_STR_FMT 20
+
+#define evt_dump(str, fmt, val...) \
+ printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
+
+#define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
+
+#define evt_dump_end printf("\b}\n")
+
+#define EVT_MAX_STAGES 64
+#define EVT_MAX_PORTS 256
+#define EVT_MAX_QUEUES 256
+
+static inline bool
+evt_has_distributed_sched(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
+ true : false;
+}
+
+static inline bool
+evt_has_burst_mode(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
+ true : false;
+}
+
+
+static inline bool
+evt_has_all_types_queue(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
+ true : false;
+}
+
+static inline uint32_t
+evt_sched_type2queue_cfg(uint8_t sched_type)
+{
+ uint32_t ret;
+
+ switch (sched_type) {
+ case RTE_SCHED_TYPE_ATOMIC:
+ ret = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ ret = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ ret = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ break;
+ default:
+ rte_panic("Invalid sched_type %d\n", sched_type);
+ }
+ return ret;
+}
+
+#endif /* _EVT_COMMON_*/
diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
new file mode 100644
index 00000000..1c3a7fae
--- /dev/null
+++ b/app/test-eventdev/evt_main.c
@@ -0,0 +1,227 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <rte_atomic.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_eventdev.h>
+
+#include "evt_options.h"
+#include "evt_test.h"
+
+struct evt_options opt;
+struct evt_test *test;
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\nSignal %d received, preparing to exit...\n",
+ signum);
+ /* request all lcores to exit from the main loop */
+ *(int *)test->test_priv = true;
+ rte_wmb();
+
+ rte_eal_mp_wait_lcore();
+
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
+
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
+
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+
+ /* exit with the expected status */
+ signal(signum, SIG_DFL);
+ kill(getpid(), signum);
+ }
+}
+
+static inline void
+evt_options_dump_all(struct evt_test *test, struct evt_options *opts)
+{
+ evt_options_dump(opts);
+ if (test->ops.opt_dump)
+ test->ops.opt_dump(opts);
+}
+
+int
+main(int argc, char **argv)
+{
+ uint8_t evdevs;
+ int ret;
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ evdevs = rte_event_dev_count();
+ if (!evdevs)
+ rte_panic("no eventdev devices found\n");
+
+ /* Populate the default values of the options */
+ evt_options_default(&opt);
+
+ /* Parse the command line arguments */
+ ret = evt_options_parse(&opt, argc, argv);
+ if (ret) {
+ evt_err("parsing on or more user options failed");
+ goto error;
+ }
+
+ /* Get struct evt_test *test from name */
+ test = evt_test_get(opt.test_name);
+ if (test == NULL) {
+ evt_err("failed to find requested test: %s", opt.test_name);
+ goto error;
+ }
+
+ if (test->ops.test_result == NULL) {
+ evt_err("%s: ops.test_result not found", opt.test_name);
+ goto error;
+ }
+
+ /* Verify the command line options */
+ if (opt.dev_id >= rte_event_dev_count()) {
+ evt_err("invalid event device %d", opt.dev_id);
+ goto error;
+ }
+ if (test->ops.opt_check) {
+ if (test->ops.opt_check(&opt)) {
+ evt_err("invalid command line argument");
+ evt_options_dump_all(test, &opt);
+ goto error;
+ }
+ }
+
+ /* Check the eventdev capability before proceeding */
+ if (test->ops.cap_check) {
+ if (test->ops.cap_check(&opt) == false) {
+ evt_info("unsupported test: %s", opt.test_name);
+ evt_options_dump_all(test, &opt);
+ ret = EVT_TEST_UNSUPPORTED;
+ goto nocap;
+ }
+ }
+
+ /* Dump the options */
+ if (opt.verbose_level)
+ evt_options_dump_all(test, &opt);
+
+ /* Test specific setup */
+ if (test->ops.test_setup) {
+ if (test->ops.test_setup(test, &opt)) {
+ evt_err("failed to setup test: %s", opt.test_name);
+ goto error;
+
+ }
+ }
+
+ /* Test specific mempool setup */
+ if (test->ops.mempool_setup) {
+ if (test->ops.mempool_setup(test, &opt)) {
+ evt_err("%s: mempool setup failed", opt.test_name);
+ goto test_destroy;
+ }
+ }
+
+ /* Test specific ethdev setup */
+ if (test->ops.ethdev_setup) {
+ if (test->ops.ethdev_setup(test, &opt)) {
+ evt_err("%s: ethdev setup failed", opt.test_name);
+ goto mempool_destroy;
+ }
+ }
+
+ /* Test specific eventdev setup */
+ if (test->ops.eventdev_setup) {
+ if (test->ops.eventdev_setup(test, &opt)) {
+ evt_err("%s: eventdev setup failed", opt.test_name);
+ goto ethdev_destroy;
+ }
+ }
+
+ /* Launch lcores */
+ if (test->ops.launch_lcores) {
+ if (test->ops.launch_lcores(test, &opt)) {
+ evt_err("%s: failed to launch lcores", opt.test_name);
+ goto eventdev_destroy;
+ }
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ /* Print the test result */
+ ret = test->ops.test_result(test, &opt);
+nocap:
+ if (ret == EVT_TEST_SUCCESS) {
+ printf("Result: "CLGRN"%s"CLNRM"\n", "Success");
+ } else if (ret == EVT_TEST_FAILED) {
+ printf("Result: "CLRED"%s"CLNRM"\n", "Failed");
+ return EXIT_FAILURE;
+ } else if (ret == EVT_TEST_UNSUPPORTED) {
+ printf("Result: "CLYEL"%s"CLNRM"\n", "Unsupported");
+ }
+
+ return 0;
+eventdev_destroy:
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
+
+ethdev_destroy:
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
+
+mempool_destroy:
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+test_destroy:
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+error:
+ return EXIT_FAILURE;
+}
diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c
new file mode 100644
index 00000000..65e22f84
--- /dev/null
+++ b/app/test-eventdev/evt_options.c
@@ -0,0 +1,341 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+
+#include "evt_options.h"
+#include "evt_test.h"
+#include "parser.h"
+
+void
+evt_options_default(struct evt_options *opt)
+{
+ memset(opt, 0, sizeof(*opt));
+ opt->verbose_level = 1; /* Enable minimal prints */
+ opt->dev_id = 0;
+ strncpy(opt->test_name, "order_queue", EVT_TEST_NAME_MAX_LEN);
+ opt->nb_flows = 1024;
+ opt->socket_id = SOCKET_ID_ANY;
+ opt->pool_sz = 16 * 1024;
+ opt->wkr_deq_dep = 16;
+ opt->nb_pkts = (1ULL << 26); /* do ~64M packets */
+}
+
+typedef int (*option_parser_t)(struct evt_options *opt,
+ const char *arg);
+
+struct long_opt_parser {
+ const char *lgopt_name;
+ option_parser_t parser_fn;
+};
+
+static int
+evt_parse_nb_flows(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint32(&(opt->nb_flows), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_dev_id(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint8(&(opt->dev_id), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_verbose(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->verbose_level = atoi(arg);
+ return 0;
+}
+
+static int
+evt_parse_fwd_latency(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->fwd_latency = 1;
+ return 0;
+}
+
+static int
+evt_parse_queue_priority(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->q_priority = 1;
+ return 0;
+}
+
+static int
+evt_parse_test_name(struct evt_options *opt, const char *arg)
+{
+ snprintf(opt->test_name, EVT_TEST_NAME_MAX_LEN, "%s", arg);
+ return 0;
+}
+
+static int
+evt_parse_slcore(struct evt_options *opt, const char *arg)
+{
+ opt->slcore = atoi(arg);
+ return 0;
+}
+
+static int
+evt_parse_socket_id(struct evt_options *opt, const char *arg)
+{
+ opt->socket_id = atoi(arg);
+ return 0;
+}
+
+static int
+evt_parse_wkr_deq_dep(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint16(&(opt->wkr_deq_dep), arg);
+ return ret;
+}
+
+static int
+evt_parse_nb_pkts(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->nb_pkts), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_pool_sz(struct evt_options *opt, const char *arg)
+{
+ opt->pool_sz = atoi(arg);
+
+ return 0;
+}
+
+static int
+evt_parse_plcores(struct evt_options *opt, const char *corelist)
+{
+ int ret;
+
+ ret = parse_lcores_list(opt->plcores, corelist);
+ if (ret == -E2BIG)
+ evt_err("duplicate lcores in plcores");
+
+ return ret;
+}
+
+static int
+evt_parse_work_lcores(struct evt_options *opt, const char *corelist)
+{
+ int ret;
+
+ ret = parse_lcores_list(opt->wlcores, corelist);
+ if (ret == -E2BIG)
+ evt_err("duplicate lcores in wlcores");
+
+ return ret;
+}
+
+static void
+usage(char *program)
+{
+ printf("usage : %s [EAL options] -- [application options]\n", program);
+ printf("application options:\n");
+ printf("\t--verbose : verbose level\n"
+ "\t--dev : device id of the event device\n"
+ "\t--test : name of the test application to run\n"
+ "\t--socket_id : socket_id of application resources\n"
+ "\t--pool_sz : pool size of the mempool\n"
+ "\t--slcore : lcore id of the scheduler\n"
+ "\t--plcores : list of lcore ids for producers\n"
+ "\t--wlcores : list of lcore ids for workers\n"
+ "\t--stlist : list of scheduled types of the stages\n"
+ "\t--nb_flows : number of flows to produce\n"
+ "\t--nb_pkts : number of packets to produce\n"
+ "\t--worker_deq_depth : dequeue depth of the worker\n"
+ "\t--fwd_latency : perform fwd_latency measurement\n"
+ "\t--queue_priority : enable queue priority\n"
+ );
+ printf("available tests:\n");
+ evt_test_dump_names();
+}
+
+static int
+evt_parse_sched_type_list(struct evt_options *opt, const char *arg)
+{
+ char c;
+ int i = 0, j = -1;
+
+ for (i = 0; i < EVT_MAX_STAGES; i++)
+ opt->sched_type_list[i] = (uint8_t)-1;
+
+ i = 0;
+
+ do {
+ c = arg[++j];
+
+ switch (c) {
+ case 'o':
+ case 'O':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_ORDERED;
+ break;
+ case 'a':
+ case 'A':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_ATOMIC;
+ break;
+ case 'p':
+ case 'P':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_PARALLEL;
+ break;
+ case ',':
+ break;
+ default:
+ if (c != '\0') {
+ evt_err("invalid sched_type %c", c);
+ return -EINVAL;
+ }
+ }
+ } while (c != '\0');
+
+ opt->nb_stages = i;
+ return 0;
+}
+
+static struct option lgopts[] = {
+ { EVT_NB_FLOWS, 1, 0, 0 },
+ { EVT_DEVICE, 1, 0, 0 },
+ { EVT_VERBOSE, 1, 0, 0 },
+ { EVT_TEST, 1, 0, 0 },
+ { EVT_PROD_LCORES, 1, 0, 0 },
+ { EVT_WORK_LCORES, 1, 0, 0 },
+ { EVT_SOCKET_ID, 1, 0, 0 },
+ { EVT_POOL_SZ, 1, 0, 0 },
+ { EVT_NB_PKTS, 1, 0, 0 },
+ { EVT_WKR_DEQ_DEP, 1, 0, 0 },
+ { EVT_SCHED_LCORE, 1, 0, 0 },
+ { EVT_SCHED_TYPE_LIST, 1, 0, 0 },
+ { EVT_FWD_LATENCY, 0, 0, 0 },
+ { EVT_QUEUE_PRIORITY, 0, 0, 0 },
+ { EVT_HELP, 0, 0, 0 },
+ { NULL, 0, 0, 0 }
+};
+
+static int
+evt_opts_parse_long(int opt_idx, struct evt_options *opt)
+{
+ unsigned int i;
+
+ struct long_opt_parser parsermap[] = {
+ { EVT_NB_FLOWS, evt_parse_nb_flows},
+ { EVT_DEVICE, evt_parse_dev_id},
+ { EVT_VERBOSE, evt_parse_verbose},
+ { EVT_TEST, evt_parse_test_name},
+ { EVT_PROD_LCORES, evt_parse_plcores},
+ { EVT_WORK_LCORES, evt_parse_work_lcores},
+ { EVT_SOCKET_ID, evt_parse_socket_id},
+ { EVT_POOL_SZ, evt_parse_pool_sz},
+ { EVT_NB_PKTS, evt_parse_nb_pkts},
+ { EVT_WKR_DEQ_DEP, evt_parse_wkr_deq_dep},
+ { EVT_SCHED_LCORE, evt_parse_slcore},
+ { EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list},
+ { EVT_FWD_LATENCY, evt_parse_fwd_latency},
+ { EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
+ };
+
+ for (i = 0; i < RTE_DIM(parsermap); i++) {
+ if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name,
+ strlen(parsermap[i].lgopt_name)) == 0)
+ return parsermap[i].parser_fn(opt, optarg);
+ }
+
+ return -EINVAL;
+}
+
+int
+evt_options_parse(struct evt_options *opt, int argc, char **argv)
+{
+ int opts, retval, opt_idx;
+
+ while ((opts = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
+ switch (opts) {
+ case 0: /* long options */
+ if (!strcmp(lgopts[opt_idx].name, "help")) {
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ }
+
+ retval = evt_opts_parse_long(opt_idx, opt);
+ if (retval != 0)
+ return retval;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void
+evt_options_dump(struct evt_options *opt)
+{
+ int lcore_id;
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ evt_dump("driver", "%s", dev_info.driver_name);
+ evt_dump("test", "%s", opt->test_name);
+ evt_dump("dev", "%d", opt->dev_id);
+ evt_dump("verbose_level", "%d", opt->verbose_level);
+ evt_dump("socket_id", "%d", opt->socket_id);
+ evt_dump("pool_sz", "%d", opt->pool_sz);
+ evt_dump("master lcore", "%d", rte_get_master_lcore());
+ evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts);
+ evt_dump_begin("available lcores");
+ RTE_LCORE_FOREACH(lcore_id)
+ printf("%d ", lcore_id);
+ evt_dump_end;
+ evt_dump_nb_flows(opt);
+ evt_dump_worker_dequeue_depth(opt);
+}
diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h
new file mode 100644
index 00000000..d8a9fdcc
--- /dev/null
+++ b/app/test-eventdev/evt_options.h
@@ -0,0 +1,277 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EVT_OPTIONS_
+#define _EVT_OPTIONS_
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+
+#include "evt_common.h"
+
+#define EVT_BOOL_FMT(x) ((x) ? "true" : "false")
+
+#define EVT_VERBOSE ("verbose")
+#define EVT_DEVICE ("dev")
+#define EVT_TEST ("test")
+#define EVT_SCHED_LCORE ("slcore")
+#define EVT_PROD_LCORES ("plcores")
+#define EVT_WORK_LCORES ("wlcores")
+#define EVT_NB_FLOWS ("nb_flows")
+#define EVT_SOCKET_ID ("socket_id")
+#define EVT_POOL_SZ ("pool_sz")
+#define EVT_WKR_DEQ_DEP ("worker_deq_depth")
+#define EVT_NB_PKTS ("nb_pkts")
+#define EVT_NB_STAGES ("nb_stages")
+#define EVT_SCHED_TYPE_LIST ("stlist")
+#define EVT_FWD_LATENCY ("fwd_latency")
+#define EVT_QUEUE_PRIORITY ("queue_priority")
+#define EVT_HELP ("help")
+
+struct evt_options {
+#define EVT_TEST_NAME_MAX_LEN 32
+ char test_name[EVT_TEST_NAME_MAX_LEN];
+ bool plcores[RTE_MAX_LCORE];
+ bool wlcores[RTE_MAX_LCORE];
+ uint8_t sched_type_list[EVT_MAX_STAGES];
+ int slcore;
+ uint32_t nb_flows;
+ int socket_id;
+ int pool_sz;
+ int nb_stages;
+ int verbose_level;
+ uint64_t nb_pkts;
+ uint16_t wkr_deq_dep;
+ uint8_t dev_id;
+ uint32_t fwd_latency:1;
+ uint32_t q_priority:1;
+};
+
+void evt_options_default(struct evt_options *opt);
+int evt_options_parse(struct evt_options *opt, int argc, char **argv);
+void evt_options_dump(struct evt_options *opt);
+
+/* options check helpers */
+static inline bool
+evt_lcores_has_overlap(bool lcores[], int lcore)
+{
+ if (lcores[lcore] == true) {
+ evt_err("lcore overlaps at %d", lcore);
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool
+evt_lcores_has_overlap_multi(bool lcoresx[], bool lcoresy[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (lcoresx[i] && lcoresy[i]) {
+ evt_err("lcores overlaps at %d", i);
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool
+evt_has_active_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ return true;
+ return false;
+}
+
+static inline int
+evt_nr_active_lcores(bool lcores[])
+{
+ int i;
+ int c = 0;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ c++;
+ return c;
+}
+
+static inline int
+evt_get_first_active_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ return i;
+ return -1;
+}
+
+static inline bool
+evt_has_disabled_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if ((lcores[i] == true) && !(rte_lcore_is_enabled(i)))
+ return true;
+ return false;
+}
+
+static inline bool
+evt_has_invalid_stage(struct evt_options *opt)
+{
+ if (!opt->nb_stages) {
+ evt_err("need minimum one stage, check --stlist");
+ return true;
+ }
+ if (opt->nb_stages > EVT_MAX_STAGES) {
+ evt_err("requested changes are beyond EVT_MAX_STAGES=%d",
+ EVT_MAX_STAGES);
+ return true;
+ }
+ return false;
+}
+
+static inline bool
+evt_has_invalid_sched_type(struct evt_options *opt)
+{
+ int i;
+
+ for (i = 0; i < opt->nb_stages; i++) {
+ if (opt->sched_type_list[i] > RTE_SCHED_TYPE_PARALLEL) {
+ evt_err("invalid sched_type %d at %d",
+ opt->sched_type_list[i], i);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* option dump helpers */
+static inline void
+evt_dump_worker_lcores(struct evt_options *opt)
+{
+ int c;
+
+ evt_dump_begin("worker lcores");
+ for (c = 0; c < RTE_MAX_LCORE; c++) {
+ if (opt->wlcores[c])
+ printf("%d ", c);
+ }
+ evt_dump_end;
+}
+
+static inline void
+evt_dump_producer_lcores(struct evt_options *opt)
+{
+ int c;
+
+ evt_dump_begin("producer lcores");
+ for (c = 0; c < RTE_MAX_LCORE; c++) {
+ if (opt->plcores[c])
+ printf("%d ", c);
+ }
+ evt_dump_end;
+}
+
+static inline void
+evt_dump_nb_flows(struct evt_options *opt)
+{
+ evt_dump("nb_flows", "%d", opt->nb_flows);
+}
+
+static inline void
+evt_dump_scheduler_lcore(struct evt_options *opt)
+{
+ evt_dump("scheduler lcore", "%d", opt->slcore);
+}
+
+static inline void
+evt_dump_worker_dequeue_depth(struct evt_options *opt)
+{
+ evt_dump("worker deq depth", "%d", opt->wkr_deq_dep);
+}
+
+static inline void
+evt_dump_nb_stages(struct evt_options *opt)
+{
+ evt_dump("nb_stages", "%d", opt->nb_stages);
+}
+
+static inline void
+evt_dump_fwd_latency(struct evt_options *opt)
+{
+ evt_dump("fwd_latency", "%s", EVT_BOOL_FMT(opt->fwd_latency));
+}
+
+static inline void
+evt_dump_queue_priority(struct evt_options *opt)
+{
+ evt_dump("queue_priority", "%s", EVT_BOOL_FMT(opt->q_priority));
+}
+
+static inline const char*
+evt_sched_type_2_str(uint8_t sched_type)
+{
+
+ if (sched_type == RTE_SCHED_TYPE_ORDERED)
+ return "O";
+ else if (sched_type == RTE_SCHED_TYPE_ATOMIC)
+ return "A";
+ else if (sched_type == RTE_SCHED_TYPE_PARALLEL)
+ return "P";
+ else
+ return "I";
+}
+
+static inline void
+evt_dump_sched_type_list(struct evt_options *opt)
+{
+ int i;
+
+ evt_dump_begin("sched_type_list");
+ for (i = 0; i < opt->nb_stages; i++)
+ printf("%s ", evt_sched_type_2_str(opt->sched_type_list[i]));
+
+ evt_dump_end;
+}
+
+#endif /* _EVT_OPTIONS_ */
diff --git a/app/test-eventdev/evt_test.c b/app/test-eventdev/evt_test.c
new file mode 100644
index 00000000..3a432233
--- /dev/null
+++ b/app/test-eventdev/evt_test.c
@@ -0,0 +1,70 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include "evt_test.h"
+
+static STAILQ_HEAD(, evt_test_entry) head = STAILQ_HEAD_INITIALIZER(head);
+
+void
+evt_test_register(struct evt_test_entry *entry)
+{
+ STAILQ_INSERT_TAIL(&head, entry, next);
+}
+
+struct evt_test*
+evt_test_get(const char *name)
+{
+ struct evt_test_entry *entry;
+
+ if (!name)
+ return NULL;
+
+ STAILQ_FOREACH(entry, &head, next)
+ if (!strncmp(entry->test.name, name, strlen(name)))
+ return &entry->test;
+
+ return NULL;
+}
+
+void
+evt_test_dump_names(void)
+{
+ struct evt_test_entry *entry;
+
+ STAILQ_FOREACH(entry, &head, next)
+ if (entry->test.name)
+ printf("\t %s\n", entry->test.name);
+}
diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h
new file mode 100644
index 00000000..17bdd165
--- /dev/null
+++ b/app/test-eventdev/evt_test.h
@@ -0,0 +1,125 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EVT_TEST_
+#define _EVT_TEST_
+
+#include <string.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_eal.h>
+
+enum evt_test_result {
+ EVT_TEST_SUCCESS,
+ EVT_TEST_FAILED,
+ EVT_TEST_UNSUPPORTED,
+};
+
+struct evt_test;
+struct evt_options;
+
+typedef bool (*evt_test_capability_check_t)(struct evt_options *opt);
+typedef int (*evt_test_options_check_t)(struct evt_options *opt);
+typedef void (*evt_test_options_dump_t)(struct evt_options *opt);
+typedef int (*evt_test_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_mempool_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_ethdev_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_eventdev_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_launch_lcores_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_result_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_eventdev_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_ethdev_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_mempool_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+
+struct evt_test_ops {
+ evt_test_capability_check_t cap_check;
+ evt_test_options_check_t opt_check;
+ evt_test_options_dump_t opt_dump;
+ evt_test_setup_t test_setup;
+ evt_test_mempool_setup_t mempool_setup;
+ evt_test_ethdev_setup_t ethdev_setup;
+ evt_test_eventdev_setup_t eventdev_setup;
+ evt_test_launch_lcores_t launch_lcores;
+ evt_test_result_t test_result;
+ evt_test_eventdev_destroy_t eventdev_destroy;
+ evt_test_ethdev_destroy_t ethdev_destroy;
+ evt_test_mempool_destroy_t mempool_destroy;
+ evt_test_destroy_t test_destroy;
+};
+
+struct evt_test {
+ const char *name;
+ void *test_priv;
+ struct evt_test_ops ops;
+};
+
+struct evt_test_entry {
+ struct evt_test test;
+
+ STAILQ_ENTRY(evt_test_entry) next;
+};
+
+void evt_test_register(struct evt_test_entry *test);
+void evt_test_dump_names(void);
+
+#define EVT_TEST_REGISTER(nm) \
+static struct evt_test_entry _evt_test_entry_ ##nm; \
+RTE_INIT(evt_test_ ##nm); \
+static void evt_test_ ##nm(void) \
+{ \
+ _evt_test_entry_ ##nm.test.name = RTE_STR(nm);\
+ memcpy(&_evt_test_entry_ ##nm.test.ops, &nm, \
+ sizeof(struct evt_test_ops)); \
+ evt_test_register(&_evt_test_entry_ ##nm); \
+}
+
+struct evt_test *evt_test_get(const char *name);
+
+static inline void *
+evt_test_priv(struct evt_test *test)
+{
+ return test->test_priv;
+}
+
+#endif /* _EVT_TEST_ */
diff --git a/app/test-eventdev/parser.c b/app/test-eventdev/parser.c
new file mode 100644
index 00000000..9de41bf4
--- /dev/null
+++ b/app/test-eventdev/parser.c
@@ -0,0 +1,388 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Cavium, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include <stdbool.h>
+
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_int32(int32_t *value, const char *p)
+{
+ char *next;
+ int32_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtol(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if ((string == NULL) ||
+ (tokens == NULL) ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if ((i == *n_tokens) &&
+ (strtok_r(string, PARSE_DELIMITER, &string) != NULL))
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+parse_lcores_list(bool lcores[], const char *corelist)
+{
+ int i, idx = 0;
+ int min, max;
+ char *end = NULL;
+
+ if (corelist == NULL)
+ return -1;
+ while (isblank(*corelist))
+ corelist++;
+ i = strlen(corelist);
+ while ((i > 0) && isblank(corelist[i - 1]))
+ i--;
+
+ /* Get list of lcores */
+ min = RTE_MAX_LCORE;
+ do {
+ while (isblank(*corelist))
+ corelist++;
+ if (*corelist == '\0')
+ return -1;
+ idx = strtoul(corelist, &end, 10);
+
+ if (end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ for (idx = min; idx <= max; idx++) {
+ if (lcores[idx] == 1)
+ return -E2BIG;
+ lcores[idx] = 1;
+ }
+
+ min = RTE_MAX_LCORE;
+ } else
+ return -1;
+ corelist = end + 1;
+ } while (*end != '\0');
+
+ return 0;
+}
diff --git a/app/test-eventdev/parser.h b/app/test-eventdev/parser.h
new file mode 100644
index 00000000..75a5a3b4
--- /dev/null
+++ b/app/test-eventdev/parser.h
@@ -0,0 +1,79 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PARSER_H__
+#define __INCLUDE_PARSER_H__
+
+#include <stdint.h>
+
+#define PARSE_DELIMITER " \f\n\r\t\v"
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
+
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int parser_read_arg_bool(const char *p);
+
+int parser_read_uint64(uint64_t *value, const char *p);
+int parser_read_uint32(uint32_t *value, const char *p);
+int parser_read_uint16(uint16_t *value, const char *p);
+int parser_read_uint8(uint8_t *value, const char *p);
+
+int parser_read_uint64_hex(uint64_t *value, const char *p);
+int parser_read_uint32_hex(uint32_t *value, const char *p);
+int parser_read_uint16_hex(uint16_t *value, const char *p);
+int parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int parser_read_int32(int32_t *value, const char *p);
+
+int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
+
+int parse_lcores_list(bool lcores[], const char *corelist);
+#endif
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
new file mode 100644
index 00000000..7e6c67d4
--- /dev/null
+++ b/app/test-eventdev/test_order_atq.c
@@ -0,0 +1,232 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include "test_order_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static inline __attribute__((always_inline)) void
+order_atq_process_stage_0(struct rte_event *const ev)
+{
+ ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+order_atq_worker(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->err == false) {
+ uint16_t event = rte_event_dequeue_burst(dev_id, port,
+ &ev, 1, 0);
+ if (!event) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ if (ev.sub_event_type == 0) { /* stage 0 from producer */
+ order_atq_process_stage_0(&ev);
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
+ != 1)
+ rte_pause();
+ } else if (ev.sub_event_type == 1) { /* stage 1 */
+ order_process_stage_1(t, &ev, nb_flows,
+ expected_flow_seq, outstand_pkts);
+ } else {
+ order_process_stage_invalid(t, &ev);
+ }
+ }
+ return 0;
+}
+
+static int
+order_atq_worker_burst(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i;
+
+ while (t->err == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
+ BURST_SIZE, 0);
+
+ if (nb_rx == 0) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (ev[i].sub_event_type == 0) { /*stage 0 */
+ order_atq_process_stage_0(&ev[i]);
+ } else if (ev[i].sub_event_type == 1) { /* stage 1 */
+ order_process_stage_1(t, &ev[i], nb_flows,
+ expected_flow_seq, outstand_pkts);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ order_process_stage_invalid(t, &ev[i]);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+
+ if (burst)
+ return order_atq_worker_burst(arg);
+ else
+ return order_atq_worker(arg);
+}
+
+static int
+order_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return order_launch_lcores(test, opt, worker_wrapper);
+}
+
+#define NB_QUEUES 1
+static int
+order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+
+ const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
+ /* number of active worker cores + 1 producer */
+ const uint8_t nb_ports = nb_workers + 1;
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = NB_QUEUES,/* one all types queue */
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+
+ ret = rte_event_dev_configure(opt->dev_id, &config);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q0 all types queue configuration */
+ struct rte_event_queue_conf q0_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 0, &q0_conf);
+ if (ret) {
+ evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* setup one port per worker, linking to all queues */
+ ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
+ if (ret)
+ return ret;
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+order_atq_opt_dump(struct evt_options *opt)
+{
+ order_opt_dump(opt);
+ evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
+}
+
+static bool
+order_atq_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
+ order_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ NB_QUEUES, dev_info.max_event_queues,
+ order_nb_event_ports(opt), dev_info.max_event_ports);
+ return false;
+ }
+
+ if (!evt_has_all_types_queue(opt->dev_id))
+ return false;
+
+ return true;
+}
+
+static const struct evt_test_ops order_atq = {
+ .cap_check = order_atq_capability_check,
+ .opt_check = order_opt_check,
+ .opt_dump = order_atq_opt_dump,
+ .test_setup = order_test_setup,
+ .mempool_setup = order_mempool_setup,
+ .eventdev_setup = order_atq_eventdev_setup,
+ .launch_lcores = order_atq_launch_lcores,
+ .eventdev_destroy = order_eventdev_destroy,
+ .mempool_destroy = order_mempool_destroy,
+ .test_result = order_test_result,
+ .test_destroy = order_test_destroy,
+};
+
+EVT_TEST_REGISTER(order_atq);
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
new file mode 100644
index 00000000..80e14c08
--- /dev/null
+++ b/app/test-eventdev/test_order_common.c
@@ -0,0 +1,380 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test_order_common.h"
+
+int
+order_test_result(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ return t->result;
+}
+
+static inline int
+order_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_order *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ uint32_t *producer_flow_seq = t->producer_flow_seq;
+ const uint32_t nb_flows = t->nb_flows;
+ uint64_t count = 0;
+ struct rte_mbuf *m;
+ struct rte_event ev;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+ __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 0; /* stage 0 */
+
+ while (count < nb_pkts && t->err == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ const uint32_t flow = (uintptr_t)m % nb_flows;
+ /* Maintain seq number per flow */
+ m->seqn = producer_flow_seq[flow]++;
+
+ ev.flow_id = flow;
+ ev.mbuf = m;
+
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+ if (t->err)
+ break;
+ rte_pause();
+ }
+
+ count++;
+ }
+ return 0;
+}
+
+int
+order_opt_check(struct evt_options *opt)
+{
+ /* 1 producer + N workers + 1 master */
+ if (rte_lcore_count() < 3) {
+ evt_err("test need minimum 3 lcores");
+ return -1;
+ }
+
+ /* Validate worker lcores */
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
+ evt_err("worker lcores overlaps with master lcore");
+ return -1;
+ }
+
+ if (evt_nr_active_lcores(opt->plcores) == 0) {
+ evt_err("missing the producer lcore");
+ return -1;
+ }
+
+ if (evt_nr_active_lcores(opt->plcores) != 1) {
+ evt_err("only one producer lcore must be selected");
+ return -1;
+ }
+
+ int plcore = evt_get_first_active_lcore(opt->plcores);
+
+ if (plcore < 0) {
+ evt_err("failed to find active producer");
+ return plcore;
+ }
+
+ if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
+ evt_err("worker lcores overlaps producer lcore");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->wlcores)) {
+ evt_err("one or more workers lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->wlcores)) {
+ evt_err("minimum one worker is required");
+ return -1;
+ }
+
+ /* Validate producer lcore */
+ if (plcore == (int)rte_get_master_lcore()) {
+ evt_err("producer lcore and master lcore should be different");
+ return -1;
+ }
+ if (!rte_lcore_is_enabled(plcore)) {
+ evt_err("producer lcore is not enabled");
+ return -1;
+ }
+
+ /* Fixups */
+ if (opt->nb_pkts == 0)
+ opt->nb_pkts = INT64_MAX;
+
+ return 0;
+}
+
+int
+order_test_setup(struct evt_test *test, struct evt_options *opt)
+{
+ void *test_order;
+
+ test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ if (test_order == NULL) {
+ evt_err("failed to allocate test_order memory");
+ goto nomem;
+ }
+ test->test_priv = test_order;
+
+ struct test_order *t = evt_test_priv(test);
+
+ t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
+ sizeof(*t->producer_flow_seq) * opt->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+
+ if (t->producer_flow_seq == NULL) {
+ evt_err("failed to allocate t->producer_flow_seq memory");
+ goto prod_nomem;
+ }
+
+ t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
+ sizeof(*t->expected_flow_seq) * opt->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+
+ if (t->expected_flow_seq == NULL) {
+ evt_err("failed to allocate t->expected_flow_seq memory");
+ goto exp_nomem;
+ }
+ rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
+ t->err = false;
+ t->nb_pkts = opt->nb_pkts;
+ t->nb_flows = opt->nb_flows;
+ t->result = EVT_TEST_FAILED;
+ t->opt = opt;
+ return 0;
+
+exp_nomem:
+ rte_free(t->producer_flow_seq);
+prod_nomem:
+ rte_free(test->test_priv);
+nomem:
+ return -ENOMEM;
+}
+
+void
+order_test_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ rte_free(t->expected_flow_seq);
+ rte_free(t->producer_flow_seq);
+ rte_free(test->test_priv);
+}
+
+int
+order_mempool_setup(struct evt_test *test, struct evt_options *opt)
+{
+ struct test_order *t = evt_test_priv(test);
+
+ t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
+ 256 /* Cache */, 0,
+ 512, /* Use very small mbufs */
+ opt->socket_id);
+ if (t->pool == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ rte_mempool_free(t->pool);
+}
+
+void
+order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(test);
+
+ rte_event_dev_stop(opt->dev_id);
+ rte_event_dev_close(opt->dev_id);
+}
+
+void
+order_opt_dump(struct evt_options *opt)
+{
+ evt_dump_producer_lcores(opt);
+ evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
+}
+
+int
+order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_order *t = evt_test_priv(test);
+
+ int wkr_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
+ lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ wkr_idx++;
+ }
+
+ /* launch producer */
+ int plcore = evt_get_first_active_lcore(opt->plcores);
+
+ ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
+ if (ret) {
+ evt_err("failed to launch order_producer %d", plcore);
+ return ret;
+ }
+
+ uint64_t cycles = rte_get_timer_cycles();
+ int64_t old_remaining = -1;
+
+ while (t->err == false) {
+
+ rte_event_schedule(opt->dev_id);
+
+ uint64_t new_cycles = rte_get_timer_cycles();
+ int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
+
+ if (remaining <= 0) {
+ t->result = EVT_TEST_SUCCESS;
+ break;
+ }
+
+ if (new_cycles - cycles > rte_get_timer_hz() * 1) {
+ printf(CLGRN"\r%"PRId64""CLNRM, remaining);
+ fflush(stdout);
+ if (old_remaining == remaining) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No schedules for seconds, deadlock");
+ t->err = true;
+ rte_smp_wmb();
+ break;
+ }
+ old_remaining = remaining;
+ cycles = new_cycles;
+ }
+ }
+ printf("\r");
+
+ return 0;
+}
+
+int
+order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t nb_workers, uint8_t nb_queues)
+{
+ int ret;
+ uint8_t port;
+ struct test_order *t = evt_test_priv(test);
+
+ /* port configuration */
+ const struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < nb_workers; port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+ if (ret != nb_queues) {
+ evt_err("failed to link all queues to port %d", port);
+ return -EINVAL;
+ }
+ }
+ /* port for producer, no links */
+ const struct rte_event_port_conf prod_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 32,
+ .new_event_threshold = 1200,
+ };
+ struct prod_data *p = &t->prod;
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port; /* last port */
+ p->queue_id = 0;
+ p->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
+ if (ret) {
+ evt_err("failed to setup producer port %d", port);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
new file mode 100644
index 00000000..57bc76e0
--- /dev/null
+++ b/app/test-eventdev/test_order_common.h
@@ -0,0 +1,153 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TEST_ORDER_COMMON_
+#define _TEST_ORDER_COMMON_
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "evt_common.h"
+#include "evt_options.h"
+#include "evt_test.h"
+
+#define BURST_SIZE 16
+
+struct test_order;
+
+struct worker_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ struct test_order *t;
+};
+
+struct prod_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ uint8_t queue_id;
+ struct test_order *t;
+};
+
+struct test_order {
+ /* Don't change the offset of "err". Signal handler use this memory
+ * to terminate all lcores work.
+ */
+ int err;
+ /*
+ * The atomic_* is an expensive operation,Since it is a functional test,
+ * We are using the atomic_ operation to reduce the code complexity.
+ */
+ rte_atomic64_t outstand_pkts;
+ enum evt_test_result result;
+ uint32_t nb_flows;
+ uint64_t nb_pkts;
+ struct rte_mempool *pool;
+ struct prod_data prod;
+ struct worker_data worker[EVT_MAX_PORTS];
+ uint32_t *producer_flow_seq;
+ uint32_t *expected_flow_seq;
+ struct evt_options *opt;
+} __rte_cache_aligned;
+
+static inline int
+order_nb_event_ports(struct evt_options *opt)
+{
+ return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
+}
+
+static inline __attribute__((always_inline)) void
+order_process_stage_1(struct test_order *const t,
+ struct rte_event *const ev, const uint32_t nb_flows,
+ uint32_t *const expected_flow_seq,
+ rte_atomic64_t *const outstand_pkts)
+{
+ const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
+ /* compare the seqn against expected value */
+ if (ev->mbuf->seqn != expected_flow_seq[flow]) {
+ evt_err("flow=%x seqn mismatch got=%x expected=%x",
+ flow, ev->mbuf->seqn, expected_flow_seq[flow]);
+ t->err = true;
+ rte_smp_wmb();
+ }
+ /*
+ * Events from an atomic flow of an event queue can be scheduled only to
+ * a single port at a time. The port is guaranteed to have exclusive
+ * (atomic) access for given atomic flow.So we don't need to update
+ * expected_flow_seq in critical section.
+ */
+ expected_flow_seq[flow]++;
+ rte_pktmbuf_free(ev->mbuf);
+ rte_atomic64_sub(outstand_pkts, 1);
+}
+
+static inline __attribute__((always_inline)) void
+order_process_stage_invalid(struct test_order *const t,
+ struct rte_event *const ev)
+{
+ evt_err("invalid queue %d", ev->queue_id);
+ t->err = true;
+ rte_smp_wmb();
+}
+
+#define ORDER_WORKER_INIT\
+ struct worker_data *w = arg;\
+ struct test_order *t = w->t;\
+ struct evt_options *opt = t->opt;\
+ const uint8_t dev_id = w->dev_id;\
+ const uint8_t port = w->port_id;\
+ const uint32_t nb_flows = t->nb_flows;\
+ uint32_t *expected_flow_seq = t->expected_flow_seq;\
+ rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
+ if (opt->verbose_level > 1)\
+ printf("%s(): lcore %d dev_id %d port=%d\n",\
+ __func__, rte_lcore_id(), dev_id, port)
+
+int order_test_result(struct evt_test *test, struct evt_options *opt);
+int order_opt_check(struct evt_options *opt);
+int order_test_setup(struct evt_test *test, struct evt_options *opt);
+int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *));
+int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t nb_workers, uint8_t nb_queues);
+void order_test_destroy(struct evt_test *test, struct evt_options *opt);
+void order_opt_dump(struct evt_options *opt);
+void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
+
+#endif /* _TEST_ORDER_COMMON_ */
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
new file mode 100644
index 00000000..beadd9c3
--- /dev/null
+++ b/app/test-eventdev/test_order_queue.c
@@ -0,0 +1,242 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include "test_order_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static inline __attribute__((always_inline)) void
+order_queue_process_stage_0(struct rte_event *const ev)
+{
+ ev->queue_id = 1; /* q1 atomic queue */
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+order_queue_worker(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->err == false) {
+ uint16_t event = rte_event_dequeue_burst(dev_id, port,
+ &ev, 1, 0);
+ if (!event) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ if (ev.queue_id == 0) { /* from ordered queue */
+ order_queue_process_stage_0(&ev);
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
+ != 1)
+ rte_pause();
+ } else if (ev.queue_id == 1) { /* from atomic queue */
+ order_process_stage_1(t, &ev, nb_flows,
+ expected_flow_seq, outstand_pkts);
+ } else {
+ order_process_stage_invalid(t, &ev);
+ }
+ }
+ return 0;
+}
+
+static int
+order_queue_worker_burst(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i;
+
+ while (t->err == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
+ BURST_SIZE, 0);
+
+ if (nb_rx == 0) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (ev[i].queue_id == 0) { /* from ordered queue */
+ order_queue_process_stage_0(&ev[i]);
+ } else if (ev[i].queue_id == 1) {/* from atomic queue */
+ order_process_stage_1(t, &ev[i], nb_flows,
+ expected_flow_seq, outstand_pkts);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ order_process_stage_invalid(t, &ev[i]);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+
+ if (burst)
+ return order_queue_worker_burst(arg);
+ else
+ return order_queue_worker(arg);
+}
+
+static int
+order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return order_launch_lcores(test, opt, worker_wrapper);
+}
+
+#define NB_QUEUES 2
+static int
+order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+
+ const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
+ /* number of active worker cores + 1 producer */
+ const uint8_t nb_ports = nb_workers + 1;
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+
+ ret = rte_event_dev_configure(opt->dev_id, &config);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q0 (ordered queue) configuration */
+ struct rte_event_queue_conf q0_ordered_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
+ if (ret) {
+ evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q1 (atomic queue) configuration */
+ struct rte_event_queue_conf q1_atomic_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
+ if (ret) {
+ evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* setup one port per worker, linking to all queues */
+ ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
+ if (ret)
+ return ret;
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+order_queue_opt_dump(struct evt_options *opt)
+{
+ order_opt_dump(opt);
+ evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
+}
+
+static bool
+order_queue_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
+ order_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ NB_QUEUES, dev_info.max_event_queues,
+ order_nb_event_ports(opt), dev_info.max_event_ports);
+ return false;
+ }
+
+ return true;
+}
+
+static const struct evt_test_ops order_queue = {
+ .cap_check = order_queue_capability_check,
+ .opt_check = order_opt_check,
+ .opt_dump = order_queue_opt_dump,
+ .test_setup = order_test_setup,
+ .mempool_setup = order_mempool_setup,
+ .eventdev_setup = order_queue_eventdev_setup,
+ .launch_lcores = order_queue_launch_lcores,
+ .eventdev_destroy = order_eventdev_destroy,
+ .mempool_destroy = order_mempool_destroy,
+ .test_result = order_test_result,
+ .test_destroy = order_test_destroy,
+};
+
+EVT_TEST_REGISTER(order_queue);
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
new file mode 100644
index 00000000..9c3efa3a
--- /dev/null
+++ b/app/test-eventdev/test_perf_atq.c
@@ -0,0 +1,277 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test_perf_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static inline int
+atq_nb_event_queues(struct evt_options *opt)
+{
+ /* nb_queues = number of producers */
+ return evt_nr_active_lcores(opt->plcores);
+}
+
+static inline __attribute__((always_inline)) void
+atq_mark_fwd_latency(struct rte_event *const ev)
+{
+ if (unlikely(ev->sub_event_type == 0)) {
+ struct perf_elt *const m = ev->event_ptr;
+
+ m->timestamp = rte_get_timer_cycles();
+ }
+}
+
+static inline __attribute__((always_inline)) void
+atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
+ const uint8_t nb_stages)
+{
+ ev->sub_event_type++;
+ ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+perf_atq_worker(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (enable_fwd_latency)
+ rte_prefetch0(ev.event_ptr);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ if (enable_fwd_latency)
+ /* first stage in pipeline, mark ts to compute fwd latency */
+ atq_mark_fwd_latency(&ev);
+
+ /* last stage in pipeline */
+ if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(pool,
+ &ev, w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool, &ev, w,
+ bufs, sz, cnt);
+ } else {
+ atq_fwd_event(&ev, sched_type_list, nb_stages);
+ while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
+ rte_pause();
+ }
+ }
+ return 0;
+}
+
+static int
+perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ uint16_t i;
+ /* +1 to avoid prefetch out of array check */
+ struct rte_event ev[BURST_SIZE + 1];
+
+ while (t->done == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (enable_fwd_latency) {
+ rte_prefetch0(ev[i+1].event_ptr);
+ /* first stage in pipeline.
+ * mark time stamp to compute fwd latency
+ */
+ atq_mark_fwd_latency(&ev[i]);
+ }
+ /* last stage in pipeline */
+ if (unlikely((ev[i].sub_event_type % nb_stages)
+ == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(
+ pool, &ev[i], w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev[i], w, bufs, sz, cnt);
+
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ atq_fwd_event(&ev[i], sched_type_list,
+ nb_stages);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const int fwd_latency = opt->fwd_latency;
+
+ /* allow compiler to optimize */
+ if (!burst && !fwd_latency)
+ return perf_atq_worker(arg, 0);
+ else if (!burst && fwd_latency)
+ return perf_atq_worker(arg, 1);
+ else if (burst && !fwd_latency)
+ return perf_atq_worker_burst(arg, 0);
+ else if (burst && fwd_latency)
+ return perf_atq_worker_burst(arg, 1);
+
+ rte_panic("invalid worker\n");
+}
+
+static int
+perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return perf_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+ uint8_t queue;
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = atq_nb_event_queues(opt),
+ .nb_event_ports = perf_nb_event_ports(opt),
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+
+ ret = rte_event_dev_configure(opt->dev_id, &config);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ struct rte_event_queue_conf q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ /* queue configurations */
+ for (queue = 0; queue < atq_nb_event_queues(opt); queue++) {
+ ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+ if (ret) {
+ evt_err("failed to setup queue=%d", queue);
+ return ret;
+ }
+ }
+
+ ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
+ atq_nb_event_queues(opt));
+ if (ret)
+ return ret;
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+perf_atq_opt_dump(struct evt_options *opt)
+{
+ perf_opt_dump(opt, atq_nb_event_queues(opt));
+}
+
+static int
+perf_atq_opt_check(struct evt_options *opt)
+{
+ return perf_opt_check(opt, atq_nb_event_queues(opt));
+}
+
+static bool
+perf_atq_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
+ dev_info.max_event_ports < perf_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ atq_nb_event_queues(opt), dev_info.max_event_queues,
+ perf_nb_event_ports(opt), dev_info.max_event_ports);
+ }
+ if (!evt_has_all_types_queue(opt->dev_id))
+ return false;
+
+ return true;
+}
+
+static const struct evt_test_ops perf_atq = {
+ .cap_check = perf_atq_capability_check,
+ .opt_check = perf_atq_opt_check,
+ .opt_dump = perf_atq_opt_dump,
+ .test_setup = perf_test_setup,
+ .mempool_setup = perf_mempool_setup,
+ .eventdev_setup = perf_atq_eventdev_setup,
+ .launch_lcores = perf_atq_launch_lcores,
+ .eventdev_destroy = perf_eventdev_destroy,
+ .mempool_destroy = perf_mempool_destroy,
+ .test_result = perf_test_result,
+ .test_destroy = perf_test_destroy,
+};
+
+EVT_TEST_REGISTER(perf_atq);
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
new file mode 100644
index 00000000..7b092994
--- /dev/null
+++ b/app/test-eventdev/test_perf_common.c
@@ -0,0 +1,497 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test_perf_common.h"
+
+int
+perf_test_result(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_perf *t = evt_test_priv(test);
+
+ return t->result;
+}
+
+static inline int
+perf_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ const uint32_t nb_flows = t->nb_flows;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ struct perf_elt *m;
+ struct rte_event ev;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
+ rte_lcore_id(), dev_id, port, p->queue_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = t->opt->sched_type_list[0];
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 0; /* stage 0 */
+
+ while (count < nb_pkts && t->done == false) {
+ if (rte_mempool_get(pool, (void **)&m) < 0)
+ continue;
+
+ ev.flow_id = flow_counter++ % nb_flows;
+ ev.event_ptr = m;
+ m->timestamp = rte_get_timer_cycles();
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+ if (t->done)
+ break;
+ rte_pause();
+ m->timestamp = rte_get_timer_cycles();
+ }
+ count++;
+ }
+
+ return 0;
+}
+
+static inline int
+scheduler(void *arg)
+{
+ struct test_perf *t = arg;
+ const uint8_t dev_id = t->opt->dev_id;
+
+ while (t->done == false)
+ rte_event_schedule(dev_id);
+
+ return 0;
+}
+
+static inline uint64_t
+processed_pkts(struct test_perf *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ rte_smp_rmb();
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
+
+ return total;
+}
+
+static inline uint64_t
+total_latency(struct test_perf *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ rte_smp_rmb();
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].latency;
+
+ return total;
+}
+
+
+int
+perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_perf *t = evt_test_priv(test);
+
+ int port_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker,
+ &t->worker[port_idx], lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ /* launch producers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->plcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(perf_producer, &t->prod[port_idx],
+ lcore_id);
+ if (ret) {
+ evt_err("failed to launch perf_producer %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ /* launch scheduler */
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ ret = rte_eal_remote_launch(scheduler, t, opt->slcore);
+ if (ret) {
+ evt_err("failed to launch sched %d", opt->slcore);
+ return ret;
+ }
+ }
+
+ const uint64_t total_pkts = opt->nb_pkts *
+ evt_nr_active_lcores(opt->plcores);
+
+ uint64_t dead_lock_cycles = rte_get_timer_cycles();
+ int64_t dead_lock_remaining = total_pkts;
+ const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
+
+ uint64_t perf_cycles = rte_get_timer_cycles();
+ int64_t perf_remaining = total_pkts;
+ const uint64_t perf_sample = rte_get_timer_hz();
+
+ static float total_mpps;
+ static uint64_t samples;
+
+ const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
+ int64_t remaining = t->outstand_pkts - processed_pkts(t);
+
+ while (t->done == false) {
+ const uint64_t new_cycles = rte_get_timer_cycles();
+
+ if ((new_cycles - perf_cycles) > perf_sample) {
+ const uint64_t latency = total_latency(t);
+ const uint64_t pkts = processed_pkts(t);
+
+ remaining = t->outstand_pkts - pkts;
+ float mpps = (float)(perf_remaining-remaining)/1000000;
+
+ perf_remaining = remaining;
+ perf_cycles = new_cycles;
+ total_mpps += mpps;
+ ++samples;
+ if (opt->fwd_latency && pkts > 0) {
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
+ mpps, total_mpps/samples,
+ (float)(latency/pkts)/freq_mhz);
+ } else {
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
+ mpps, total_mpps/samples);
+ }
+ fflush(stdout);
+
+ if (remaining <= 0) {
+ t->done = true;
+ t->result = EVT_TEST_SUCCESS;
+ rte_smp_wmb();
+ break;
+ }
+ }
+
+ if (new_cycles - dead_lock_cycles > dead_lock_sample) {
+ remaining = t->outstand_pkts - processed_pkts(t);
+ if (dead_lock_remaining == remaining) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No schedules for seconds, deadlock");
+ t->done = true;
+ rte_smp_wmb();
+ break;
+ }
+ dead_lock_remaining = remaining;
+ dead_lock_cycles = new_cycles;
+ }
+ }
+ printf("\n");
+ return 0;
+}
+
+int
+perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t stride, uint8_t nb_queues)
+{
+ struct test_perf *t = evt_test_priv(test);
+ uint8_t port, prod;
+ int ret = -1;
+
+ /* port configuration */
+ const struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
+ port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+ w->processed_pkts = 0;
+ w->latency = 0;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+ if (ret != nb_queues) {
+ evt_err("failed to link all queues to port %d", port);
+ return -EINVAL;
+ }
+ }
+
+ /* port for producers, no links */
+ const struct rte_event_port_conf prod_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 32,
+ .new_event_threshold = 1200,
+ };
+ prod = 0;
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port;
+ p->queue_id = prod * stride;
+ p->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+ prod++;
+ }
+
+ return ret;
+}
+
+int
+perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
+{
+ unsigned int lcores;
+ bool need_slcore = !evt_has_distributed_sched(opt->dev_id);
+
+ /* N producer + N worker + 1 scheduler(based on dev capa) + 1 master */
+ lcores = need_slcore ? 4 : 3;
+
+ if (rte_lcore_count() < lcores) {
+ evt_err("test need minimum %d lcores", lcores);
+ return -1;
+ }
+
+ /* Validate worker lcores */
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
+ evt_err("worker lcores overlaps with master lcore");
+ return -1;
+ }
+ if (need_slcore && evt_lcores_has_overlap(opt->wlcores, opt->slcore)) {
+ evt_err("worker lcores overlaps with scheduler lcore");
+ return -1;
+ }
+ if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
+ evt_err("worker lcores overlaps producer lcores");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->wlcores)) {
+ evt_err("one or more workers lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->wlcores)) {
+ evt_err("minimum one worker is required");
+ return -1;
+ }
+
+ /* Validate producer lcores */
+ if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) {
+ evt_err("producer lcores overlaps with master lcore");
+ return -1;
+ }
+ if (need_slcore && evt_lcores_has_overlap(opt->plcores, opt->slcore)) {
+ evt_err("producer lcores overlaps with scheduler lcore");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->plcores)) {
+ evt_err("one or more producer lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->plcores)) {
+ evt_err("minimum one producer is required");
+ return -1;
+ }
+
+ /* Validate scheduler lcore */
+ if (!evt_has_distributed_sched(opt->dev_id) &&
+ opt->slcore == (int)rte_get_master_lcore()) {
+ evt_err("scheduler lcore and master lcore should be different");
+ return -1;
+ }
+ if (need_slcore && !rte_lcore_is_enabled(opt->slcore)) {
+ evt_err("scheduler lcore is not enabled");
+ return -1;
+ }
+
+ if (evt_has_invalid_stage(opt))
+ return -1;
+
+ if (evt_has_invalid_sched_type(opt))
+ return -1;
+
+ if (nb_queues > EVT_MAX_QUEUES) {
+ evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
+ return -1;
+ }
+ if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
+ evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
+ return -1;
+ }
+
+ /* Fixups */
+ if (opt->nb_stages == 1 && opt->fwd_latency) {
+ evt_info("fwd_latency is valid when nb_stages > 1, disabling");
+ opt->fwd_latency = 0;
+ }
+ if (opt->fwd_latency && !opt->q_priority) {
+ evt_info("enabled queue priority for latency measurement");
+ opt->q_priority = 1;
+ }
+ if (opt->nb_pkts == 0)
+ opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
+
+ return 0;
+}
+
+void
+perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
+{
+ evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
+ evt_dump_producer_lcores(opt);
+ evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ if (!evt_has_distributed_sched(opt->dev_id))
+ evt_dump_scheduler_lcore(opt);
+ evt_dump_nb_stages(opt);
+ evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
+ evt_dump("nb_evdev_queues", "%d", nb_queues);
+ evt_dump_queue_priority(opt);
+ evt_dump_sched_type_list(opt);
+}
+
+void
+perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(test);
+
+ rte_event_dev_stop(opt->dev_id);
+ rte_event_dev_close(opt->dev_id);
+}
+
+static inline void
+perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
+ void *obj, unsigned i __rte_unused)
+{
+ memset(obj, 0, mp->elt_size);
+}
+
+int
+perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
+{
+ struct test_perf *t = evt_test_priv(test);
+
+ t->pool = rte_mempool_create(test->name, /* mempool name */
+ opt->pool_sz, /* number of elements*/
+ sizeof(struct perf_elt), /* element size*/
+ 512, /* cache size*/
+ 0, NULL, NULL,
+ perf_elt_init, /* obj constructor */
+ NULL, opt->socket_id, 0); /* flags */
+ if (t->pool == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_perf *t = evt_test_priv(test);
+
+ rte_mempool_free(t->pool);
+}
+
+int
+perf_test_setup(struct evt_test *test, struct evt_options *opt)
+{
+ void *test_perf;
+
+ test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ if (test_perf == NULL) {
+ evt_err("failed to allocate test_perf memory");
+ goto nomem;
+ }
+ test->test_priv = test_perf;
+
+ struct test_perf *t = evt_test_priv(test);
+
+ t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
+ t->nb_workers = evt_nr_active_lcores(opt->wlcores);
+ t->done = false;
+ t->nb_pkts = opt->nb_pkts;
+ t->nb_flows = opt->nb_flows;
+ t->result = EVT_TEST_FAILED;
+ t->opt = opt;
+ memcpy(t->sched_type_list, opt->sched_type_list,
+ sizeof(opt->sched_type_list));
+ return 0;
+nomem:
+ return -ENOMEM;
+}
+
+void
+perf_test_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+
+ rte_free(test->test_priv);
+}
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
new file mode 100644
index 00000000..4956586c
--- /dev/null
+++ b/app/test-eventdev/test_perf_common.h
@@ -0,0 +1,169 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TEST_PERF_COMMON_
+#define _TEST_PERF_COMMON_
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "evt_common.h"
+#include "evt_options.h"
+#include "evt_test.h"
+
+struct test_perf;
+
+struct worker_data {
+ uint64_t processed_pkts;
+ uint64_t latency;
+ uint8_t dev_id;
+ uint8_t port_id;
+ struct test_perf *t;
+} __rte_cache_aligned;
+
+struct prod_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ uint8_t queue_id;
+ struct test_perf *t;
+} __rte_cache_aligned;
+
+struct test_perf {
+ /* Don't change the offset of "done". Signal handler use this memory
+ * to terminate all lcores work.
+ */
+ int done;
+ uint64_t outstand_pkts;
+ uint8_t nb_workers;
+ enum evt_test_result result;
+ uint32_t nb_flows;
+ uint64_t nb_pkts;
+ struct rte_mempool *pool;
+ struct prod_data prod[EVT_MAX_PORTS];
+ struct worker_data worker[EVT_MAX_PORTS];
+ struct evt_options *opt;
+ uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
+} __rte_cache_aligned;
+
+struct perf_elt {
+ uint64_t timestamp;
+} __rte_cache_aligned;
+
+#define BURST_SIZE 16
+
+#define PERF_WORKER_INIT\
+ struct worker_data *w = arg;\
+ struct test_perf *t = w->t;\
+ struct evt_options *opt = t->opt;\
+ const uint8_t dev = w->dev_id;\
+ const uint8_t port = w->port_id;\
+ uint8_t *const sched_type_list = &t->sched_type_list[0];\
+ struct rte_mempool *const pool = t->pool;\
+ const uint8_t nb_stages = t->opt->nb_stages;\
+ const uint8_t laststage = nb_stages - 1;\
+ uint8_t cnt = 0;\
+ void *bufs[16] __rte_cache_aligned;\
+ int const sz = RTE_DIM(bufs);\
+ if (opt->verbose_level > 1)\
+ printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
+ rte_lcore_id(), dev, port)
+
+static inline __attribute__((always_inline)) int
+perf_process_last_stage(struct rte_mempool *const pool,
+ struct rte_event *const ev, struct worker_data *const w,
+ void *bufs[], int const buf_sz, uint8_t count)
+{
+ bufs[count++] = ev->event_ptr;
+ w->processed_pkts++;
+ rte_smp_wmb();
+
+ if (unlikely(count == buf_sz)) {
+ count = 0;
+ rte_mempool_put_bulk(pool, bufs, buf_sz);
+ }
+ return count;
+}
+
+static inline __attribute__((always_inline)) uint8_t
+perf_process_last_stage_latency(struct rte_mempool *const pool,
+ struct rte_event *const ev, struct worker_data *const w,
+ void *bufs[], int const buf_sz, uint8_t count)
+{
+ uint64_t latency;
+ struct perf_elt *const m = ev->event_ptr;
+
+ bufs[count++] = ev->event_ptr;
+ w->processed_pkts++;
+
+ if (unlikely(count == buf_sz)) {
+ count = 0;
+ latency = rte_get_timer_cycles() - m->timestamp;
+ rte_mempool_put_bulk(pool, bufs, buf_sz);
+ } else {
+ latency = rte_get_timer_cycles() - m->timestamp;
+ }
+
+ w->latency += latency;
+ rte_smp_wmb();
+ return count;
+}
+
+
+static inline int
+perf_nb_event_ports(struct evt_options *opt)
+{
+ return evt_nr_active_lcores(opt->wlcores) +
+ evt_nr_active_lcores(opt->plcores);
+}
+
+int perf_test_result(struct evt_test *test, struct evt_options *opt);
+int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
+int perf_test_setup(struct evt_test *test, struct evt_options *opt);
+int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t stride, uint8_t nb_queues);
+int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *));
+void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
+void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+
+#endif /* _TEST_PERF_COMMON_ */
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
new file mode 100644
index 00000000..658c08a0
--- /dev/null
+++ b/app/test-eventdev/test_perf_queue.c
@@ -0,0 +1,288 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test_perf_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static inline int
+perf_queue_nb_event_queues(struct evt_options *opt)
+{
+ /* nb_queues = number of producers * number of stages */
+ return evt_nr_active_lcores(opt->plcores) * opt->nb_stages;
+}
+
+static inline __attribute__((always_inline)) void
+mark_fwd_latency(struct rte_event *const ev,
+ const uint8_t nb_stages)
+{
+ if (unlikely((ev->queue_id % nb_stages) == 0)) {
+ struct perf_elt *const m = ev->event_ptr;
+
+ m->timestamp = rte_get_timer_cycles();
+ }
+}
+
+static inline __attribute__((always_inline)) void
+fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
+ const uint8_t nb_stages)
+{
+ ev->queue_id++;
+ ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+perf_queue_worker(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+ if (enable_fwd_latency)
+ /* first q in pipeline, mark timestamp to compute fwd latency */
+ mark_fwd_latency(&ev, nb_stages);
+
+ /* last stage in pipeline */
+ if (unlikely((ev.queue_id % nb_stages) == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(pool,
+ &ev, w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev, w, bufs, sz, cnt);
+ } else {
+ fwd_event(&ev, sched_type_list, nb_stages);
+ while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
+ rte_pause();
+ }
+ }
+ return 0;
+}
+
+static int
+perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ uint16_t i;
+ /* +1 to avoid prefetch out of array check */
+ struct rte_event ev[BURST_SIZE + 1];
+
+ while (t->done == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (enable_fwd_latency) {
+ rte_prefetch0(ev[i+1].event_ptr);
+ /* first queue in pipeline.
+ * mark time stamp to compute fwd latency
+ */
+ mark_fwd_latency(&ev[i], nb_stages);
+ }
+ /* last stage in pipeline */
+ if (unlikely((ev[i].queue_id % nb_stages) ==
+ laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(
+ pool, &ev[i], w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev[i], w, bufs, sz, cnt);
+
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ fwd_event(&ev[i], sched_type_list, nb_stages);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const int fwd_latency = opt->fwd_latency;
+
+ /* allow compiler to optimize */
+ if (!burst && !fwd_latency)
+ return perf_queue_worker(arg, 0);
+ else if (!burst && fwd_latency)
+ return perf_queue_worker(arg, 1);
+ else if (burst && !fwd_latency)
+ return perf_queue_worker_burst(arg, 0);
+ else if (burst && fwd_latency)
+ return perf_queue_worker_burst(arg, 1);
+
+ rte_panic("invalid worker\n");
+}
+
+static int
+perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return perf_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t queue;
+ int nb_stages = opt->nb_stages;
+ int ret;
+
+ const struct rte_event_dev_config config = {
+ .nb_event_queues = perf_queue_nb_event_queues(opt),
+ .nb_event_ports = perf_nb_event_ports(opt),
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+
+ ret = rte_event_dev_configure(opt->dev_id, &config);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ struct rte_event_queue_conf q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ /* queue configurations */
+ for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
+ q_conf.event_queue_cfg = evt_sched_type2queue_cfg
+ (opt->sched_type_list[queue % nb_stages]);
+
+ if (opt->q_priority) {
+ uint8_t stage_pos = queue % nb_stages;
+ /* Configure event queues(stage 0 to stage n) with
+ * RTE_EVENT_DEV_PRIORITY_LOWEST to
+ * RTE_EVENT_DEV_PRIORITY_HIGHEST.
+ */
+ uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
+ (nb_stages - 1);
+ /* Higher prio for the queues closer to last stage */
+ q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
+ (step * stage_pos);
+ }
+ ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+ if (ret) {
+ evt_err("failed to setup queue=%d", queue);
+ return ret;
+ }
+ }
+
+ ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
+ perf_queue_nb_event_queues(opt));
+ if (ret)
+ return ret;
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+perf_queue_opt_dump(struct evt_options *opt)
+{
+ evt_dump_fwd_latency(opt);
+ perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
+}
+
+static int
+perf_queue_opt_check(struct evt_options *opt)
+{
+ return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
+}
+
+static bool
+perf_queue_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
+ dev_info.max_event_ports < perf_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ perf_queue_nb_event_queues(opt),
+ dev_info.max_event_queues,
+ perf_nb_event_ports(opt), dev_info.max_event_ports);
+ }
+
+ return true;
+}
+
+static const struct evt_test_ops perf_queue = {
+ .cap_check = perf_queue_capability_check,
+ .opt_check = perf_queue_opt_check,
+ .opt_dump = perf_queue_opt_dump,
+ .test_setup = perf_test_setup,
+ .mempool_setup = perf_mempool_setup,
+ .eventdev_setup = perf_queue_eventdev_setup,
+ .launch_lcores = perf_queue_launch_lcores,
+ .eventdev_destroy = perf_eventdev_destroy,
+ .mempool_destroy = perf_mempool_destroy,
+ .test_result = perf_test_result,
+ .test_destroy = perf_test_destroy,
+};
+
+EVT_TEST_REGISTER(perf_queue);
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index 35ecee9f..c36be19f 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -73,6 +73,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_I40E_PMD),y)
LDLIBS += -lrte_pmd_i40e
endif
+ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD),y)
+LDLIBS += -lrte_pmd_bnxt
+endif
+
ifeq ($(CONFIG_RTE_LIBRTE_PMD_XENVIRT),y)
LDLIBS += -lrte_pmd_xenvirt
endif
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0afac68c..cd8c3585 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -36,7 +36,6 @@
#include <errno.h>
#include <stdio.h>
#include <stdint.h>
-#include <stdarg.h>
#include <string.h>
#include <termios.h>
#include <unistd.h>
@@ -76,6 +75,7 @@
#include <rte_devargs.h>
#include <rte_eth_ctrl.h>
#include <rte_flow.h>
+#include <rte_gro.h>
#include <cmdline_rdline.h>
#include <cmdline_parse.h>
@@ -87,6 +87,7 @@
#include <cmdline.h>
#ifdef RTE_LIBRTE_PMD_BOND
#include <rte_eth_bond.h>
+#include <rte_eth_bond_8023ad.h>
#endif
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
@@ -94,6 +95,9 @@
#ifdef RTE_LIBRTE_I40E_PMD
#include <rte_pmd_i40e.h>
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+#include <rte_pmd_bnxt.h>
+#endif
#include "testpmd.h"
static struct cmdline *testpmd_cl;
@@ -218,6 +222,9 @@ static void cmd_help_long_parsed(void *parsed_result,
"ddp get list (port_id)\n"
" Get ddp profile info list\n\n"
+ "ddp get info (profile_path)\n"
+ " Get ddp profile information.\n\n"
+
"show vf stats (port_id) (vf_id)\n"
" Display a VF's statistics.\n\n"
@@ -278,18 +285,15 @@ static void cmd_help_long_parsed(void *parsed_result,
"set tx loopback (port_id) (on|off)\n"
" Enable or disable tx loopback.\n\n"
-#ifdef RTE_LIBRTE_IXGBE_PMD
"set all queues drop (port_id) (on|off)\n"
" Set drop enable bit for all queues.\n\n"
"set vf split drop (port_id) (vf_id) (on|off)\n"
" Set split drop enable bit for a VF from the PF.\n\n"
-#endif
"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
" Set MAC antispoof for a VF from the PF.\n\n"
-#ifdef RTE_LIBRTE_IXGBE_PMD
"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
" Enable MACsec offload.\n\n"
@@ -301,7 +305,6 @@ static void cmd_help_long_parsed(void *parsed_result,
"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
" Configure MACsec secure association (SA).\n\n"
-#endif
"set vf broadcast (port_id) (vf_id) (on|off)\n"
" Set VF broadcast for a VF from the PF.\n\n"
@@ -420,6 +423,14 @@ static void cmd_help_long_parsed(void *parsed_result,
"tso show (portid)"
" Display the status of TCP Segmentation Offload.\n\n"
+ "gro (on|off) (port_id)"
+ " Enable or disable Generic Receive Offload in"
+ " csum forwarding engine.\n\n"
+
+ "gro set (max_flow_num) (max_item_num_per_flow) (port_id)\n"
+ " Set max flow number and max packet number per-flow"
+ " for GRO.\n\n"
+
"set fwd (%s)\n"
" Set packet forwarding mode.\n\n"
@@ -523,7 +534,6 @@ static void cmd_help_long_parsed(void *parsed_result,
" Flush (default) or don't flush RX streams before"
" forwarding. Mainly used with PCAP drivers.\n\n"
- #ifdef RTE_NIC_BYPASS
"set bypass mode (normal|bypass|isolate) (port_id)\n"
" Set the bypass mode for the lowest port on bypass enabled"
" NIC.\n\n"
@@ -546,7 +556,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"show bypass config (port_id)\n"
" Show the bypass configuration for a bypass enabled NIC"
" using the lowest port on the NIC.\n\n"
-#endif
+
#ifdef RTE_LIBRTE_PMD_BOND
"create bonded device (mode) (socket)\n"
" Create a new bonded device with specific bonding mode and socket.\n\n"
@@ -569,11 +579,18 @@ static void cmd_help_long_parsed(void *parsed_result,
"set bonding mac_addr (port_id) (address)\n"
" Set the MAC address of a bonded device.\n\n"
+ "set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)"
+ " Set Aggregation mode for IEEE802.3AD (mode 4)"
+
"set bonding xmit_balance_policy (port_id) (l2|l23|l34)\n"
" Set the transmit balance policy for bonded device running in balance mode.\n\n"
"set bonding mon_period (port_id) (value)\n"
" Set the bonding link status monitoring polling period in ms.\n\n"
+
+ "set bonding lacp dedicated_queues <port_id> (enable|disable)\n"
+ " Enable/disable dedicated queues for LACP control traffic.\n\n"
+
#endif
"set link-up port (port_id)\n"
" Set link up for a port.\n\n"
@@ -602,9 +619,12 @@ static void cmd_help_long_parsed(void *parsed_result,
"E-tag set filter del e-tag-id (value) port (port_id)\n"
" Delete an E-tag forwarding filter on a port\n\n"
- "ddp add (port_id) (profile_path)\n"
+ "ddp add (port_id) (profile_path[,output_path])\n"
" Load a profile package on a port\n\n"
+ "ddp del (port_id) (profile_path)\n"
+ " Delete a profile package from a port\n\n"
+
"ptype mapping get (port_id) (valid_only)\n"
" Get ptype mapping on a port\n\n"
@@ -904,6 +924,10 @@ static void cmd_help_long_parsed(void *parsed_result,
"flow list {port_id} [group {group_id}] [...]\n"
" List existing flow rules sorted by priority,"
" filtered by group identifiers.\n\n"
+
+ "flow isolate {port_id} {boolean}\n"
+ " Restrict ingress traffic to the defined"
+ " flow rules\n\n"
);
}
}
@@ -3825,6 +3849,120 @@ cmdline_parse_inst_t cmd_tunnel_tso_show = {
},
};
+/* *** SET GRO FOR A PORT *** */
+struct cmd_gro_result {
+ cmdline_fixed_string_t cmd_keyword;
+ cmdline_fixed_string_t mode;
+ uint8_t port_id;
+};
+
+static void
+cmd_enable_gro_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_gro_result *res;
+
+ res = parsed_result;
+ setup_gro(res->mode, res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_gro_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_result,
+ cmd_keyword, "gro");
+cmdline_parse_token_string_t cmd_gro_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_result,
+ mode, "on#off");
+cmdline_parse_token_num_t cmd_gro_pid =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_enable_gro = {
+ .f = cmd_enable_gro_parsed,
+ .data = NULL,
+ .help_str = "gro (on|off) (port_id)",
+ .tokens = {
+ (void *)&cmd_gro_keyword,
+ (void *)&cmd_gro_mode,
+ (void *)&cmd_gro_pid,
+ NULL,
+ },
+};
+
+/* *** SET MAX FLOW NUMBER AND ITEM NUM PER FLOW FOR GRO *** */
+struct cmd_gro_set_result {
+ cmdline_fixed_string_t gro;
+ cmdline_fixed_string_t mode;
+ uint16_t flow_num;
+ uint16_t item_num_per_flow;
+ uint8_t port_id;
+};
+
+static void
+cmd_gro_set_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_gro_set_result *res = parsed_result;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+ if (test_done == 0) {
+ printf("Before set GRO flow_num and item_num_per_flow,"
+ " please stop forwarding first\n");
+ return;
+ }
+
+ if (!strcmp(res->mode, "set")) {
+ if (res->flow_num == 0)
+ printf("Invalid flow number. Revert to default value:"
+ " %u.\n", GRO_DEFAULT_FLOW_NUM);
+ else
+ gro_ports[res->port_id].param.max_flow_num =
+ res->flow_num;
+
+ if (res->item_num_per_flow == 0)
+ printf("Invalid item number per-flow. Revert"
+ " to default value:%u.\n",
+ GRO_DEFAULT_ITEM_NUM_PER_FLOW);
+ else
+ gro_ports[res->port_id].param.max_item_per_flow =
+ res->item_num_per_flow;
+ }
+}
+
+cmdline_parse_token_string_t cmd_gro_set_gro =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_set_result,
+ gro, "gro");
+cmdline_parse_token_string_t cmd_gro_set_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_set_result,
+ mode, "set");
+cmdline_parse_token_num_t cmd_gro_set_flow_num =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
+ flow_num, UINT16);
+cmdline_parse_token_num_t cmd_gro_set_item_num_per_flow =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
+ item_num_per_flow, UINT16);
+cmdline_parse_token_num_t cmd_gro_set_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
+ port_id, UINT8);
+
+cmdline_parse_inst_t cmd_gro_set = {
+ .f = cmd_gro_set_parsed,
+ .data = NULL,
+ .help_str = "gro set <max_flow_num> <max_item_num_per_flow> "
+ "<port_id>: set max flow number and max packet number per-flow "
+ "for GRO",
+ .tokens = {
+ (void *)&cmd_gro_set_gro,
+ (void *)&cmd_gro_set_mode,
+ (void *)&cmd_gro_set_flow_num,
+ (void *)&cmd_gro_set_item_num_per_flow,
+ (void *)&cmd_gro_set_portid,
+ NULL,
+ },
+};
+
/* *** ENABLE/DISABLE FLUSH ON RX STREAMS *** */
struct cmd_set_flush_rx {
cmdline_fixed_string_t set;
@@ -3904,7 +4042,6 @@ cmdline_parse_inst_t cmd_set_link_check = {
},
};
-#ifdef RTE_NIC_BYPASS
/* *** SET NIC BYPASS MODE *** */
struct cmd_set_bypass_mode_result {
cmdline_fixed_string_t set;
@@ -3921,19 +4058,23 @@ cmd_set_bypass_mode_parsed(void *parsed_result,
{
struct cmd_set_bypass_mode_result *res = parsed_result;
portid_t port_id = res->port_id;
- uint32_t bypass_mode = RTE_BYPASS_MODE_NORMAL;
+ int32_t rc = -EINVAL;
+
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
+ uint32_t bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL;
if (!strcmp(res->value, "bypass"))
- bypass_mode = RTE_BYPASS_MODE_BYPASS;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_BYPASS;
else if (!strcmp(res->value, "isolate"))
- bypass_mode = RTE_BYPASS_MODE_ISOLATE;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE;
else
- bypass_mode = RTE_BYPASS_MODE_NORMAL;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL;
/* Set the bypass mode for the relevant port. */
- if (0 != rte_eth_dev_bypass_state_set(port_id, &bypass_mode)) {
+ rc = rte_pmd_ixgbe_bypass_state_set(port_id, &bypass_mode);
+#endif
+ if (rc != 0)
printf("\t Failed to set bypass mode for port = %d.\n", port_id);
- }
}
cmdline_parse_token_string_t cmd_setbypass_mode_set =
@@ -3983,51 +4124,57 @@ cmd_set_bypass_event_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- int32_t rc;
+ int32_t rc = -EINVAL;
struct cmd_set_bypass_event_result *res = parsed_result;
portid_t port_id = res->port_id;
- uint32_t bypass_event = RTE_BYPASS_EVENT_NONE;
- uint32_t bypass_mode = RTE_BYPASS_MODE_NORMAL;
+
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
+ uint32_t bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_NONE;
+ uint32_t bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL;
if (!strcmp(res->event_value, "timeout"))
- bypass_event = RTE_BYPASS_EVENT_TIMEOUT;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT;
else if (!strcmp(res->event_value, "os_on"))
- bypass_event = RTE_BYPASS_EVENT_OS_ON;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_OS_ON;
else if (!strcmp(res->event_value, "os_off"))
- bypass_event = RTE_BYPASS_EVENT_OS_OFF;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_OS_OFF;
else if (!strcmp(res->event_value, "power_on"))
- bypass_event = RTE_BYPASS_EVENT_POWER_ON;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_POWER_ON;
else if (!strcmp(res->event_value, "power_off"))
- bypass_event = RTE_BYPASS_EVENT_POWER_OFF;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_POWER_OFF;
else
- bypass_event = RTE_BYPASS_EVENT_NONE;
+ bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_NONE;
if (!strcmp(res->mode_value, "bypass"))
- bypass_mode = RTE_BYPASS_MODE_BYPASS;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_BYPASS;
else if (!strcmp(res->mode_value, "isolate"))
- bypass_mode = RTE_BYPASS_MODE_ISOLATE;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE;
else
- bypass_mode = RTE_BYPASS_MODE_NORMAL;
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL;
/* Set the watchdog timeout. */
- if (bypass_event == RTE_BYPASS_EVENT_TIMEOUT) {
+ if (bypass_event == RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT) {
rc = -EINVAL;
- if (!RTE_BYPASS_TMT_VALID(bypass_timeout) ||
- (rc = rte_eth_dev_wd_timeout_store(port_id,
- bypass_timeout)) != 0) {
+ if (RTE_PMD_IXGBE_BYPASS_TMT_VALID(bypass_timeout)) {
+ rc = rte_pmd_ixgbe_bypass_wd_timeout_store(port_id,
+ bypass_timeout);
+ }
+ if (rc != 0) {
printf("Failed to set timeout value %u "
- "for port %d, errto code: %d.\n",
- bypass_timeout, port_id, rc);
+ "for port %d, errto code: %d.\n",
+ bypass_timeout, port_id, rc);
}
}
/* Set the bypass event to transition to bypass mode. */
- if (0 != rte_eth_dev_bypass_event_store(port_id,
- bypass_event, bypass_mode)) {
- printf("\t Failed to set bypass event for port = %d.\n", port_id);
- }
+ rc = rte_pmd_ixgbe_bypass_event_store(port_id, bypass_event,
+ bypass_mode);
+#endif
+ if (rc != 0)
+ printf("\t Failed to set bypass event for port = %d.\n",
+ port_id);
}
cmdline_parse_token_string_t cmd_setbypass_event_set =
@@ -4084,24 +4231,26 @@ cmd_set_bypass_timeout_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_set_bypass_timeout_result *res = parsed_result;
+ __rte_unused struct cmd_set_bypass_timeout_result *res = parsed_result;
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
if (!strcmp(res->value, "1.5"))
- bypass_timeout = RTE_BYPASS_TMT_1_5_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_1_5_SEC;
else if (!strcmp(res->value, "2"))
- bypass_timeout = RTE_BYPASS_TMT_2_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_2_SEC;
else if (!strcmp(res->value, "3"))
- bypass_timeout = RTE_BYPASS_TMT_3_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_3_SEC;
else if (!strcmp(res->value, "4"))
- bypass_timeout = RTE_BYPASS_TMT_4_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_4_SEC;
else if (!strcmp(res->value, "8"))
- bypass_timeout = RTE_BYPASS_TMT_8_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_8_SEC;
else if (!strcmp(res->value, "16"))
- bypass_timeout = RTE_BYPASS_TMT_16_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_16_SEC;
else if (!strcmp(res->value, "32"))
- bypass_timeout = RTE_BYPASS_TMT_32_SEC;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_32_SEC;
else
- bypass_timeout = RTE_BYPASS_TMT_OFF;
+ bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
+#endif
}
cmdline_parse_token_string_t cmd_setbypass_timeout_set =
@@ -4145,17 +4294,19 @@ cmd_show_bypass_config_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_show_bypass_config_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ int rc = -EINVAL;
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
uint32_t event_mode;
uint32_t bypass_mode;
- portid_t port_id = res->port_id;
uint32_t timeout = bypass_timeout;
int i;
- static const char * const timeouts[RTE_BYPASS_TMT_NUM] =
+ static const char * const timeouts[RTE_PMD_IXGBE_BYPASS_TMT_NUM] =
{"off", "1.5", "2", "3", "4", "8", "16", "32"};
- static const char * const modes[RTE_BYPASS_MODE_NUM] =
+ static const char * const modes[RTE_PMD_IXGBE_BYPASS_MODE_NUM] =
{"UNKNOWN", "normal", "bypass", "isolate"};
- static const char * const events[RTE_BYPASS_EVENT_NUM] = {
+ static const char * const events[RTE_PMD_IXGBE_BYPASS_EVENT_NUM] = {
"NONE",
"OS/board on",
"power supply on",
@@ -4165,37 +4316,41 @@ cmd_show_bypass_config_parsed(void *parsed_result,
int num_events = (sizeof events) / (sizeof events[0]);
/* Display the bypass mode.*/
- if (0 != rte_eth_dev_bypass_state_show(port_id, &bypass_mode)) {
+ if (rte_pmd_ixgbe_bypass_state_show(port_id, &bypass_mode) != 0) {
printf("\tFailed to get bypass mode for port = %d\n", port_id);
return;
}
else {
- if (!RTE_BYPASS_MODE_VALID(bypass_mode))
- bypass_mode = RTE_BYPASS_MODE_NONE;
+ if (!RTE_PMD_IXGBE_BYPASS_MODE_VALID(bypass_mode))
+ bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NONE;
printf("\tbypass mode = %s\n", modes[bypass_mode]);
}
/* Display the bypass timeout.*/
- if (!RTE_BYPASS_TMT_VALID(timeout))
- timeout = RTE_BYPASS_TMT_OFF;
+ if (!RTE_PMD_IXGBE_BYPASS_TMT_VALID(timeout))
+ timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
printf("\tbypass timeout = %s\n", timeouts[timeout]);
/* Display the bypass events and associated modes. */
- for (i = RTE_BYPASS_EVENT_START; i < num_events; i++) {
+ for (i = RTE_PMD_IXGBE_BYPASS_EVENT_START; i < num_events; i++) {
- if (0 != rte_eth_dev_bypass_event_show(port_id, i, &event_mode)) {
+ if (rte_pmd_ixgbe_bypass_event_show(port_id, i, &event_mode)) {
printf("\tFailed to get bypass mode for event = %s\n",
events[i]);
} else {
- if (!RTE_BYPASS_MODE_VALID(event_mode))
- event_mode = RTE_BYPASS_MODE_NONE;
+ if (!RTE_PMD_IXGBE_BYPASS_MODE_VALID(event_mode))
+ event_mode = RTE_PMD_IXGBE_BYPASS_MODE_NONE;
printf("\tbypass event: %-16s = %s\n", events[i],
modes[event_mode]);
}
}
+#endif
+ if (rc != 0)
+ printf("\tFailed to get bypass configuration for port = %d\n",
+ port_id);
}
cmdline_parse_token_string_t cmd_showbypass_config_show =
@@ -4224,7 +4379,6 @@ cmdline_parse_inst_t cmd_show_bypass_config = {
NULL,
},
};
-#endif
#ifdef RTE_LIBRTE_PMD_BOND
/* *** SET BONDING MODE *** */
@@ -4279,6 +4433,85 @@ cmdline_parse_inst_t cmd_set_bonding_mode = {
}
};
+/* *** SET BONDING SLOW_QUEUE SW/HW *** */
+struct cmd_set_bonding_lacp_dedicated_queues_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t lacp;
+ cmdline_fixed_string_t dedicated_queues;
+ uint8_t port_id;
+ cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_bonding_lacp_dedicated_queues_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bonding_lacp_dedicated_queues_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ struct rte_port *port;
+
+ port = &ports[port_id];
+
+ /** Check if the port is not started **/
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Please stop port %d first\n", port_id);
+ return;
+ }
+
+ if (!strcmp(res->mode, "enable")) {
+ if (rte_eth_bond_8023ad_dedicated_queues_enable(port_id) == 0)
+ printf("Dedicate queues for LACP control packets"
+ " enabled\n");
+ else
+ printf("Enabling dedicate queues for LACP control "
+ "packets on port %d failed\n", port_id);
+ } else if (!strcmp(res->mode, "disable")) {
+ if (rte_eth_bond_8023ad_dedicated_queues_disable(port_id) == 0)
+ printf("Dedicated queues for LACP control packets "
+ "disabled\n");
+ else
+ printf("Disabling dedicated queues for LACP control "
+ "traffic on port %d failed\n", port_id);
+ }
+}
+
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_set =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_bonding =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ bonding, "bonding");
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_lacp =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ lacp, "lacp");
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_dedicated_queues =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ dedicated_queues, "dedicated_queues");
+cmdline_parse_token_num_t cmd_setbonding_lacp_dedicated_queues_port_id =
+TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_mode =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
+ mode, "enable#disable");
+
+cmdline_parse_inst_t cmd_set_lacp_dedicated_queues = {
+ .f = cmd_set_bonding_lacp_dedicated_queues_parsed,
+ .help_str = "set bonding lacp dedicated_queues <port_id> "
+ "enable|disable: "
+ "Enable/disable dedicated queues for LACP control traffic for port_id",
+ .data = NULL,
+ .tokens = {
+ (void *)&cmd_setbonding_lacp_dedicated_queues_set,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_bonding,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_lacp,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_dedicated_queues,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_port_id,
+ (void *)&cmd_setbonding_lacp_dedicated_queues_mode,
+ NULL
+ }
+};
+
/* *** SET BALANCE XMIT POLICY *** */
struct cmd_set_bonding_balance_xmit_policy_result {
cmdline_fixed_string_t set;
@@ -4359,7 +4592,7 @@ static void cmd_show_bonding_config_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_show_bonding_config_result *res = parsed_result;
- int bonding_mode;
+ int bonding_mode, agg_mode;
uint8_t slaves[RTE_MAX_ETHPORTS];
int num_slaves, num_active_slaves;
int primary_id;
@@ -4400,6 +4633,23 @@ static void cmd_show_bonding_config_parsed(void *parsed_result,
}
}
+ if (bonding_mode == BONDING_MODE_8023AD) {
+ agg_mode = rte_eth_bond_8023ad_agg_selection_get(port_id);
+ printf("\tIEEE802.3AD Aggregator Mode: ");
+ switch (agg_mode) {
+ case AGG_BANDWIDTH:
+ printf("bandwidth");
+ break;
+ case AGG_STABLE:
+ printf("stable");
+ break;
+ case AGG_COUNT:
+ printf("count");
+ break;
+ }
+ printf("\n");
+ }
+
num_slaves = rte_eth_bond_slaves_get(port_id, slaves, RTE_MAX_ETHPORTS);
if (num_slaves < 0) {
@@ -4546,7 +4796,7 @@ static void cmd_add_bonding_slave_parsed(void *parsed_result,
portid_t master_port_id = res->port_id;
portid_t slave_port_id = res->slave_id;
- /* Set the primary slave for a bonded device. */
+ /* add the slave for a bonded device. */
if (0 != rte_eth_bond_slave_add(master_port_id, slave_port_id)) {
printf("\t Failed to add slave %d to master port = %d.\n",
slave_port_id, master_port_id);
@@ -4604,7 +4854,7 @@ static void cmd_remove_bonding_slave_parsed(void *parsed_result,
portid_t master_port_id = res->port_id;
portid_t slave_port_id = res->slave_id;
- /* Set the primary slave for a bonded device. */
+ /* remove the slave from a bonded device. */
if (0 != rte_eth_bond_slave_remove(master_port_id, slave_port_id)) {
printf("\t Failed to remove slave %d from master port = %d.\n",
slave_port_id, master_port_id);
@@ -4669,7 +4919,7 @@ static void cmd_create_bonded_device_parsed(void *parsed_result,
return;
}
- snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "net_bond_testpmd_%d",
+ snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "net_bonding_testpmd_%d",
bond_dev_num++);
/* Create a new bonded device. */
@@ -4832,6 +5082,77 @@ cmdline_parse_inst_t cmd_set_bond_mon_period = {
}
};
+
+
+struct cmd_set_bonding_agg_mode_policy_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t bonding;
+ cmdline_fixed_string_t agg_mode;
+ uint8_t port_num;
+ cmdline_fixed_string_t policy;
+};
+
+
+static void
+cmd_set_bonding_agg_mode(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_bonding_agg_mode_policy_result *res = parsed_result;
+ uint8_t policy = AGG_BANDWIDTH;
+
+ if (res->port_num >= nb_ports) {
+ printf("Port id %d must be less than %d\n",
+ res->port_num, nb_ports);
+ return;
+ }
+
+ if (!strcmp(res->policy, "bandwidth"))
+ policy = AGG_BANDWIDTH;
+ else if (!strcmp(res->policy, "stable"))
+ policy = AGG_STABLE;
+ else if (!strcmp(res->policy, "count"))
+ policy = AGG_COUNT;
+
+ rte_eth_bond_8023ad_agg_selection_set(res->port_num, policy);
+}
+
+
+cmdline_parse_token_string_t cmd_set_bonding_agg_mode_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_bonding_agg_mode_bonding =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
+ bonding, "bonding");
+
+cmdline_parse_token_string_t cmd_set_bonding_agg_mode_agg_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
+ agg_mode, "agg_mode");
+
+cmdline_parse_token_num_t cmd_set_bonding_agg_mode_portnum =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
+ port_num, UINT8);
+
+cmdline_parse_token_string_t cmd_set_bonding_agg_mode_policy_string =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_bonding_balance_xmit_policy_result,
+ policy, "stable#bandwidth#count");
+
+cmdline_parse_inst_t cmd_set_bonding_agg_mode_policy = {
+ .f = cmd_set_bonding_agg_mode,
+ .data = (void *) 0,
+ .help_str = "set bonding mode IEEE802.3AD aggregator policy <port_id> <agg_name>",
+ .tokens = {
+ (void *)&cmd_set_bonding_agg_mode_set,
+ (void *)&cmd_set_bonding_agg_mode_bonding,
+ (void *)&cmd_set_bonding_agg_mode_agg_mode,
+ (void *)&cmd_set_bonding_agg_mode_portnum,
+ (void *)&cmd_set_bonding_agg_mode_policy_string,
+ NULL
+ }
+};
+
+
#endif /* RTE_LIBRTE_PMD_BOND */
/* *** SET FORWARDING MODE *** */
@@ -6720,7 +7041,6 @@ cmdline_parse_inst_t cmd_set_vf_macvlan_filter = {
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* *** CONFIGURE VF TRAFFIC CONTROL *** */
struct cmd_set_vf_traffic {
cmdline_fixed_string_t set;
@@ -6799,7 +7119,7 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- int ret;
+ int ret = -ENOTSUP;
uint16_t rx_mode = 0;
struct cmd_set_vf_rxmode *res = parsed_result;
@@ -6815,7 +7135,16 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
}
- ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id, rx_mode, (uint8_t)is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id,
+ rx_mode, (uint8_t)is_on);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_rxmode(res->port_id, res->vf_id,
+ rx_mode, (uint8_t)is_on);
+#endif
if (ret < 0)
printf("bad VF receive mode parameter, return code = %d \n",
ret);
@@ -6863,7 +7192,6 @@ cmdline_parse_inst_t cmd_set_vf_rxmode = {
NULL,
},
};
-#endif
/* *** ADD MAC ADDRESS FILTER FOR A VF OF A PORT *** */
struct cmd_vf_mac_addr_result {
@@ -6961,6 +7289,11 @@ cmd_vf_rx_vlan_filter_parsed(void *parsed_result,
ret = rte_pmd_i40e_set_vf_vlan_filter(res->port_id,
res->vlan_id, res->vf_mask, is_add);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_vlan_filter(res->port_id,
+ res->vlan_id, res->vf_mask, is_add);
+#endif
switch (ret) {
case 0:
@@ -7086,7 +7419,6 @@ cmdline_parse_inst_t cmd_queue_rate_limit = {
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* *** SET RATE LIMIT FOR A VF OF A PORT *** */
struct cmd_vf_rate_limit_result {
cmdline_fixed_string_t set;
@@ -7165,7 +7497,6 @@ cmdline_parse_inst_t cmd_vf_rate_limit = {
NULL,
},
};
-#endif
/* *** ADD TUNNEL FILTER OF A PORT *** */
struct cmd_tunnel_filter_result {
@@ -11012,6 +11343,11 @@ cmd_set_vf_vlan_anti_spoof_parsed(
ret = rte_pmd_i40e_set_vf_vlan_anti_spoof(res->port_id,
res->vf_id, is_on);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_vlan_anti_spoof(res->port_id,
+ res->vf_id, is_on);
+#endif
switch (ret) {
case 0:
@@ -11113,6 +11449,11 @@ cmd_set_vf_mac_anti_spoof_parsed(
ret = rte_pmd_i40e_set_vf_mac_anti_spoof(res->port_id,
res->vf_id, is_on);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_mac_anti_spoof(res->port_id,
+ res->vf_id, is_on);
+#endif
switch (ret) {
case 0:
@@ -11214,6 +11555,11 @@ cmd_set_vf_vlan_stripq_parsed(
ret = rte_pmd_i40e_set_vf_vlan_stripq(res->port_id,
res->vf_id, is_on);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_vlan_stripq(res->port_id,
+ res->vf_id, is_on);
+#endif
switch (ret) {
case 0:
@@ -11313,6 +11659,11 @@ cmd_set_vf_vlan_insert_parsed(
ret = rte_pmd_i40e_set_vf_vlan_insert(res->port_id, res->vf_id,
res->vlan_id);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_vlan_insert(res->port_id, res->vf_id,
+ res->vlan_id);
+#endif
switch (ret) {
case 0:
@@ -11402,6 +11753,10 @@ cmd_set_tx_loopback_parsed(
if (ret == -ENOTSUP)
ret = rte_pmd_i40e_set_tx_loopback(res->port_id, is_on);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_tx_loopback(res->port_id, is_on);
+#endif
switch (ret) {
case 0:
@@ -11434,7 +11789,6 @@ cmdline_parse_inst_t cmd_set_tx_loopback = {
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* all queues drop enable configuration */
/* Common result structure for all queues drop enable */
@@ -11480,13 +11834,20 @@ cmd_set_all_queues_drop_en_parsed(
__attribute__((unused)) void *data)
{
struct cmd_all_queues_drop_en_result *res = parsed_result;
- int ret = 0;
+ int ret = -ENOTSUP;
int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
- ret = rte_pmd_ixgbe_set_all_queues_drop_en(res->port_id, is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_all_queues_drop_en(res->port_id, is_on);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_all_queues_drop_en(res->port_id, is_on);
+#endif
switch (ret) {
case 0:
break;
@@ -11569,14 +11930,16 @@ cmd_set_vf_split_drop_en_parsed(
__attribute__((unused)) void *data)
{
struct cmd_vf_split_drop_en_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_set_vf_split_drop_en(res->port_id, res->vf_id,
is_on);
+#endif
switch (ret) {
case 0:
break;
@@ -11586,6 +11949,9 @@ cmd_set_vf_split_drop_en_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", res->port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11606,7 +11972,6 @@ cmdline_parse_inst_t cmd_set_vf_split_drop_en = {
NULL,
},
};
-#endif
/* vf mac address configuration */
@@ -11673,6 +12038,11 @@ cmd_set_vf_mac_addr_parsed(
ret = rte_pmd_i40e_set_vf_mac_addr(res->port_id, res->vf_id,
&res->mac_addr);
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_mac_addr(res->port_id, res->vf_id,
+ &res->mac_addr);
+#endif
switch (ret) {
case 0:
@@ -11707,7 +12077,6 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* MACsec configuration */
/* Common result structure for MACsec offload enable */
@@ -11768,7 +12137,7 @@ cmd_set_macsec_offload_on_parsed(
__attribute__((unused)) void *data)
{
struct cmd_macsec_offload_on_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
portid_t port_id = res->port_id;
int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
@@ -11777,7 +12146,11 @@ cmd_set_macsec_offload_on_parsed(
return;
ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+#endif
+ RTE_SET_USED(en);
+ RTE_SET_USED(rp);
switch (ret) {
case 0:
@@ -11785,6 +12158,9 @@ cmd_set_macsec_offload_on_parsed(
case -ENODEV:
printf("invalid port_id %d\n", port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11847,14 +12223,16 @@ cmd_set_macsec_offload_off_parsed(
__attribute__((unused)) void *data)
{
struct cmd_macsec_offload_off_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
portid_t port_id = res->port_id;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_disable(port_id);
+#endif
switch (ret) {
case 0:
@@ -11862,6 +12240,9 @@ cmd_set_macsec_offload_off_parsed(
case -ENODEV:
printf("invalid port_id %d\n", port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11929,20 +12310,27 @@ cmd_set_macsec_sc_parsed(
__attribute__((unused)) void *data)
{
struct cmd_macsec_sc_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = is_tx ?
rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
res->mac.addr_bytes) :
rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
res->mac.addr_bytes, res->pi);
+#endif
+ RTE_SET_USED(is_tx);
+
switch (ret) {
case 0:
break;
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", res->port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -12022,7 +12410,7 @@ cmd_set_macsec_sa_parsed(
__attribute__((unused)) void *data)
{
struct cmd_macsec_sa_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
uint8_t key[16] = { 0 };
uint8_t xdgt0;
@@ -12044,11 +12432,16 @@ cmd_set_macsec_sa_parsed(
key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
}
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = is_tx ?
rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
res->idx, res->an, res->pn, key) :
rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
res->idx, res->an, res->pn, key);
+#endif
+ RTE_SET_USED(is_tx);
+ RTE_SET_USED(key);
+
switch (ret) {
case 0:
break;
@@ -12058,6 +12451,9 @@ cmd_set_macsec_sa_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("not supported on port %d\n", res->port_id);
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -12080,7 +12476,6 @@ cmdline_parse_inst_t cmd_set_macsec_sa = {
NULL,
},
};
-#endif
/* VF unicast promiscuous mode configuration */
@@ -12860,6 +13255,9 @@ cmd_ddp_add_parsed(
struct cmd_ddp_add_result *res = parsed_result;
uint8_t *buff;
uint32_t size;
+ char *filepath;
+ char *file_fld[2];
+ int file_num;
int ret = -ENOTSUP;
if (res->port_id > nb_ports) {
@@ -12872,9 +13270,18 @@ cmd_ddp_add_parsed(
return;
}
- buff = open_ddp_package_file(res->filepath, &size);
- if (!buff)
+ filepath = strdup(res->filepath);
+ if (filepath == NULL) {
+ printf("Failed to allocate memory\n");
return;
+ }
+ file_num = rte_strsplit(filepath, strlen(filepath), file_fld, 2, ',');
+
+ buff = open_ddp_package_file(file_fld[0], &size);
+ if (!buff) {
+ free((void *)filepath);
+ return;
+ }
#ifdef RTE_LIBRTE_I40E_PMD
if (ret == -ENOTSUP)
@@ -12883,18 +13290,21 @@ cmd_ddp_add_parsed(
RTE_PMD_I40E_PKG_OP_WR_ADD);
#endif
- if (ret < 0)
- printf("Failed to load profile.\n");
- else if (ret > 0)
+ if (ret == -EEXIST)
printf("Profile has already existed.\n");
+ else if (ret < 0)
+ printf("Failed to load profile.\n");
+ else if (file_num == 2)
+ save_ddp_package_file(file_fld[1], buff, size);
close_ddp_package_file(buff);
+ free((void *)filepath);
}
cmdline_parse_inst_t cmd_ddp_add = {
.f = cmd_ddp_add_parsed,
.data = NULL,
- .help_str = "ddp add <port_id> <profile_path>",
+ .help_str = "ddp add <port_id> <profile_path[,output_path]>",
.tokens = {
(void *)&cmd_ddp_add_ddp,
(void *)&cmd_ddp_add_add,
@@ -12904,6 +13314,203 @@ cmdline_parse_inst_t cmd_ddp_add = {
},
};
+/* Delete dynamic device personalization*/
+struct cmd_ddp_del_result {
+ cmdline_fixed_string_t ddp;
+ cmdline_fixed_string_t del;
+ uint8_t port_id;
+ char filepath[];
+};
+
+cmdline_parse_token_string_t cmd_ddp_del_ddp =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, ddp, "ddp");
+cmdline_parse_token_string_t cmd_ddp_del_del =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, del, "del");
+cmdline_parse_token_num_t cmd_ddp_del_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ddp_del_result, port_id, UINT8);
+cmdline_parse_token_string_t cmd_ddp_del_filepath =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, filepath, NULL);
+
+static void
+cmd_ddp_del_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ddp_del_result *res = parsed_result;
+ uint8_t *buff;
+ uint32_t size;
+ int ret = -ENOTSUP;
+
+ if (res->port_id > nb_ports) {
+ printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+ return;
+ }
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ buff = open_ddp_package_file(res->filepath, &size);
+ if (!buff)
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_process_ddp_package(res->port_id,
+ buff, size,
+ RTE_PMD_I40E_PKG_OP_WR_DEL);
+#endif
+
+ if (ret == -EACCES)
+ printf("Profile does not exist.\n");
+ else if (ret < 0)
+ printf("Failed to delete profile.\n");
+
+ close_ddp_package_file(buff);
+}
+
+cmdline_parse_inst_t cmd_ddp_del = {
+ .f = cmd_ddp_del_parsed,
+ .data = NULL,
+ .help_str = "ddp del <port_id> <profile_path>",
+ .tokens = {
+ (void *)&cmd_ddp_del_ddp,
+ (void *)&cmd_ddp_del_del,
+ (void *)&cmd_ddp_del_port_id,
+ (void *)&cmd_ddp_del_filepath,
+ NULL,
+ },
+};
+
+/* Get dynamic device personalization profile info */
+struct cmd_ddp_info_result {
+ cmdline_fixed_string_t ddp;
+ cmdline_fixed_string_t get;
+ cmdline_fixed_string_t info;
+ char filepath[];
+};
+
+cmdline_parse_token_string_t cmd_ddp_info_ddp =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, ddp, "ddp");
+cmdline_parse_token_string_t cmd_ddp_info_get =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, get, "get");
+cmdline_parse_token_string_t cmd_ddp_info_info =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, info, "info");
+cmdline_parse_token_string_t cmd_ddp_info_filepath =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, filepath, NULL);
+
+static void
+cmd_ddp_info_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ddp_info_result *res = parsed_result;
+ uint8_t *pkg;
+ uint32_t pkg_size;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ uint32_t i;
+ uint8_t *buff;
+ uint32_t buff_size;
+ struct rte_pmd_i40e_profile_info info;
+ uint32_t dev_num;
+ struct rte_pmd_i40e_ddp_device_id *devs;
+#endif
+
+ pkg = open_ddp_package_file(res->filepath, &pkg_size);
+ if (!pkg)
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&info, sizeof(info),
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER);
+ if (!ret) {
+ printf("Global Track id: 0x%x\n", info.track_id);
+ printf("Global Version: %d.%d.%d.%d\n",
+ info.version.major,
+ info.version.minor,
+ info.version.update,
+ info.version.draft);
+ printf("Global Package name: %s\n\n", info.name);
+ }
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&info, sizeof(info),
+ RTE_PMD_I40E_PKG_INFO_HEADER);
+ if (!ret) {
+ printf("i40e Profile Track id: 0x%x\n", info.track_id);
+ printf("i40e Profile Version: %d.%d.%d.%d\n",
+ info.version.major,
+ info.version.minor,
+ info.version.update,
+ info.version.draft);
+ printf("i40e Profile name: %s\n\n", info.name);
+ }
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&buff_size, sizeof(buff_size),
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE);
+ if (!ret && buff_size) {
+ buff = (uint8_t *)malloc(buff_size);
+ if (buff) {
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ buff, buff_size,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES);
+ if (!ret)
+ printf("Package Notes:\n%s\n\n", buff);
+ free(buff);
+ }
+ }
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&dev_num, sizeof(dev_num),
+ RTE_PMD_I40E_PKG_INFO_DEVID_NUM);
+ if (!ret && dev_num) {
+ devs = (struct rte_pmd_i40e_ddp_device_id *)malloc(dev_num *
+ sizeof(struct rte_pmd_i40e_ddp_device_id));
+ if (devs) {
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)devs, dev_num *
+ sizeof(struct rte_pmd_i40e_ddp_device_id),
+ RTE_PMD_I40E_PKG_INFO_DEVID_LIST);
+ if (!ret) {
+ printf("List of supported devices:\n");
+ for (i = 0; i < dev_num; i++) {
+ printf(" %04X:%04X %04X:%04X\n",
+ devs[i].vendor_dev_id >> 16,
+ devs[i].vendor_dev_id & 0xFFFF,
+ devs[i].sub_vendor_dev_id >> 16,
+ devs[i].sub_vendor_dev_id & 0xFFFF);
+ }
+ printf("\n");
+ }
+ free(devs);
+ }
+ }
+ ret = 0;
+#endif
+ if (ret == -ENOTSUP)
+ printf("Function not supported in PMD driver\n");
+ close_ddp_package_file(pkg);
+}
+
+cmdline_parse_inst_t cmd_ddp_get_info = {
+ .f = cmd_ddp_info_parsed,
+ .data = NULL,
+ .help_str = "ddp get info <profile_path>",
+ .tokens = {
+ (void *)&cmd_ddp_info_ddp,
+ (void *)&cmd_ddp_info_get,
+ (void *)&cmd_ddp_info_info,
+ (void *)&cmd_ddp_info_filepath,
+ NULL,
+ },
+};
+
/* Get dynamic device personalization profile info list*/
#define PROFILE_INFO_SIZE 48
#define MAX_PROFILE_NUM 16
@@ -13042,9 +13649,16 @@ cmd_show_vf_stats_parsed(
memset(&stats, 0, sizeof(stats));
#ifdef RTE_LIBRTE_I40E_PMD
- ret = rte_pmd_i40e_get_vf_stats(res->port_id,
- res->vf_id,
- &stats);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_get_vf_stats(res->port_id,
+ res->vf_id,
+ &stats);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_get_vf_stats(res->port_id,
+ res->vf_id,
+ &stats);
#endif
switch (ret) {
@@ -13140,8 +13754,14 @@ cmd_clear_vf_stats_parsed(
return;
#ifdef RTE_LIBRTE_I40E_PMD
- ret = rte_pmd_i40e_reset_vf_stats(res->port_id,
- res->vf_id);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_reset_vf_stats(res->port_id,
+ res->vf_id);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_reset_vf_stats(res->port_id,
+ res->vf_id);
#endif
switch (ret) {
@@ -13597,12 +14217,10 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_set_allmulti_mode_all,
(cmdline_parse_inst_t *)&cmd_set_flush_rx,
(cmdline_parse_inst_t *)&cmd_set_link_check,
-#ifdef RTE_NIC_BYPASS
(cmdline_parse_inst_t *)&cmd_set_bypass_mode,
(cmdline_parse_inst_t *)&cmd_set_bypass_event,
(cmdline_parse_inst_t *)&cmd_set_bypass_timeout,
(cmdline_parse_inst_t *)&cmd_show_bypass_config,
-#endif
#ifdef RTE_LIBRTE_PMD_BOND
(cmdline_parse_inst_t *) &cmd_set_bonding_mode,
(cmdline_parse_inst_t *) &cmd_show_bonding_config,
@@ -13613,6 +14231,8 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *) &cmd_set_bond_mac_addr,
(cmdline_parse_inst_t *) &cmd_set_balance_xmit_policy,
(cmdline_parse_inst_t *) &cmd_set_bond_mon_period,
+ (cmdline_parse_inst_t *) &cmd_set_lacp_dedicated_queues,
+ (cmdline_parse_inst_t *) &cmd_set_bonding_agg_mode_policy,
#endif
(cmdline_parse_inst_t *)&cmd_vlan_offload,
(cmdline_parse_inst_t *)&cmd_vlan_tpid,
@@ -13629,6 +14249,8 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_tso_show,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_set,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_show,
+ (cmdline_parse_inst_t *)&cmd_enable_gro,
+ (cmdline_parse_inst_t *)&cmd_gro_set,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx,
@@ -13724,17 +14346,15 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_set_vf_vlan_stripq,
(cmdline_parse_inst_t *)&cmd_set_vf_vlan_insert,
(cmdline_parse_inst_t *)&cmd_set_tx_loopback,
-#ifdef RTE_LIBRTE_IXGBE_PMD
(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
- (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
(cmdline_parse_inst_t *)&cmd_set_vf_traffic,
+ (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
(cmdline_parse_inst_t *)&cmd_vf_rate_limit,
-#endif
(cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
(cmdline_parse_inst_t *)&cmd_set_vf_promisc,
@@ -13747,7 +14367,9 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_strict_link_prio,
(cmdline_parse_inst_t *)&cmd_tc_min_bw,
(cmdline_parse_inst_t *)&cmd_ddp_add,
+ (cmdline_parse_inst_t *)&cmd_ddp_del,
(cmdline_parse_inst_t *)&cmd_ddp_get_list,
+ (cmdline_parse_inst_t *)&cmd_ddp_get_info,
(cmdline_parse_inst_t *)&cmd_show_vf_stats,
(cmdline_parse_inst_t *)&cmd_clear_vf_stats,
(cmdline_parse_inst_t *)&cmd_ptype_mapping_get,
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 0fd69f90..a17a0043 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -80,6 +80,7 @@ enum index {
FLUSH,
QUERY,
LIST,
+ ISOLATE,
/* Destroy arguments. */
DESTROY_RULE,
@@ -152,6 +153,7 @@ enum index {
ITEM_TCP,
ITEM_TCP_SRC,
ITEM_TCP_DST,
+ ITEM_TCP_FLAGS,
ITEM_SCTP,
ITEM_SCTP_SRC,
ITEM_SCTP_DST,
@@ -167,6 +169,8 @@ enum index {
ITEM_MPLS_LABEL,
ITEM_GRE,
ITEM_GRE_PROTO,
+ ITEM_FUZZY,
+ ITEM_FUZZY_THRESH,
/* Validate/create actions. */
ACTIONS,
@@ -220,7 +224,6 @@ struct context {
enum index prev; /**< Index of the last token seen. */
int next_num; /**< Number of entries in next[]. */
int args_num; /**< Number of entries in args[]. */
- uint32_t reparse:1; /**< Start over from the beginning. */
uint32_t eol:1; /**< EOL has been detected. */
uint32_t last:1; /**< No more arguments. */
uint16_t port; /**< Current port ID (for completions). */
@@ -365,6 +368,9 @@ struct buffer {
uint32_t *group;
uint32_t group_n;
} list; /**< List arguments. */
+ struct {
+ int set;
+ } isolate; /**< Isolated mode arguments. */
} args; /**< Command arguments. */
};
@@ -444,6 +450,13 @@ static const enum index next_item[] = {
ITEM_NVGRE,
ITEM_MPLS,
ITEM_GRE,
+ ITEM_FUZZY,
+ ZERO,
+};
+
+static const enum index item_fuzzy[] = {
+ ITEM_FUZZY_THRESH,
+ ITEM_NEXT,
ZERO,
};
@@ -531,6 +544,7 @@ static const enum index item_udp[] = {
static const enum index item_tcp[] = {
ITEM_TCP_SRC,
ITEM_TCP_DST,
+ ITEM_TCP_FLAGS,
ITEM_NEXT,
ZERO,
};
@@ -649,6 +663,9 @@ static int parse_action(struct context *, const struct token *,
static int parse_list(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
+static int parse_isolate(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
static int parse_int(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -795,7 +812,8 @@ static const struct token token_list[] = {
DESTROY,
FLUSH,
LIST,
- QUERY)),
+ QUERY,
+ ISOLATE)),
.call = parse_init,
},
/* Sub-level commands. */
@@ -845,6 +863,15 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY(struct buffer, port)),
.call = parse_list,
},
+ [ISOLATE] = {
+ .name = "isolate",
+ .help = "restrict ingress traffic to the defined flow rules",
+ .next = NEXT(NEXT_ENTRY(BOOLEAN),
+ NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
+ ARGS_ENTRY(struct buffer, port)),
+ .call = parse_isolate,
+ },
/* Destroy arguments. */
[DESTROY_RULE] = {
.name = "rule",
@@ -1267,6 +1294,13 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
hdr.dst_port)),
},
+ [ITEM_TCP_FLAGS] = {
+ .name = "flags",
+ .help = "TCP flags",
+ .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
+ hdr.tcp_flags)),
+ },
[ITEM_SCTP] = {
.name = "sctp",
.help = "match SCTP header",
@@ -1372,6 +1406,22 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
protocol)),
},
+ [ITEM_FUZZY] = {
+ .name = "fuzzy",
+ .help = "fuzzy pattern match, expect faster than default",
+ .priv = PRIV_ITEM(FUZZY,
+ sizeof(struct rte_flow_item_fuzzy)),
+ .next = NEXT(item_fuzzy),
+ .call = parse_vc,
+ },
+ [ITEM_FUZZY_THRESH] = {
+ .name = "thresh",
+ .help = "match accuracy threshold",
+ .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
+ thresh)),
+ },
+
/* Validate/create actions. */
[ACTIONS] = {
.name = "actions",
@@ -1574,6 +1624,19 @@ arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
return len;
}
+/** Compare a string with a partial one of a given length. */
+static int
+strcmp_partial(const char *full, const char *partial, size_t partial_len)
+{
+ int r = strncmp(full, partial, partial_len);
+
+ if (r)
+ return r;
+ if (strlen(full) <= partial_len)
+ return 0;
+ return full[partial_len];
+}
+
/**
* Parse a prefix length and generate a bit-mask.
*
@@ -1656,7 +1719,7 @@ parse_default(struct context *ctx, const struct token *token,
(void)ctx;
(void)buf;
(void)size;
- if (strncmp(str, token->name, len))
+ if (strcmp_partial(token->name, str, len))
return -1;
return len;
}
@@ -1899,7 +1962,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
if (ctx->curr != ACTION_RSS_QUEUE)
return -1;
i = ctx->objdata >> 16;
- if (!strncmp(str, "end", len)) {
+ if (!strcmp_partial("end", str, len)) {
ctx->objdata &= 0xffff;
return len;
}
@@ -2034,7 +2097,7 @@ parse_action(struct context *ctx, const struct token *token,
const struct parse_action_priv *priv;
token = &token_list[next_action[i]];
- if (strncmp(token->name, str, len))
+ if (strcmp_partial(token->name, str, len))
continue;
priv = token->priv;
if (!priv)
@@ -2087,6 +2150,33 @@ parse_list(struct context *ctx, const struct token *token,
return len;
}
+/** Parse tokens for isolate command. */
+static int
+parse_isolate(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != ISOLATE)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ }
+ return len;
+}
+
/**
* Parse signed/unsigned integers 8 to 64-bit long.
*
@@ -2374,7 +2464,7 @@ parse_boolean(struct context *ctx, const struct token *token,
if (!arg)
return -1;
for (i = 0; boolean_name[i]; ++i)
- if (!strncmp(str, boolean_name[i], len))
+ if (!strcmp_partial(boolean_name[i], str, len))
break;
/* Process token as integer. */
if (boolean_name[i])
@@ -2534,7 +2624,6 @@ cmd_flow_context_init(struct context *ctx)
ctx->prev = ZERO;
ctx->next_num = 0;
ctx->args_num = 0;
- ctx->reparse = 0;
ctx->eol = 0;
ctx->last = 0;
ctx->port = 0;
@@ -2555,9 +2644,6 @@ cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
int i;
(void)hdr;
- /* Restart as requested. */
- if (ctx->reparse)
- cmd_flow_context_init(ctx);
token = &token_list[ctx->curr];
/* Check argument length. */
ctx->eol = 0;
@@ -2633,8 +2719,6 @@ cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
int i;
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
/* Count number of tokens in current list. */
if (ctx->next_num)
list = ctx->next[ctx->next_num - 1];
@@ -2668,8 +2752,6 @@ cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
int i;
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
/* Count number of tokens in current list. */
if (ctx->next_num)
list = ctx->next[ctx->next_num - 1];
@@ -2704,8 +2786,6 @@ cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
const struct token *token = &token_list[ctx->prev];
(void)hdr;
- /* Tell cmd_flow_parse() that context must be reinitialized. */
- ctx->reparse = 1;
if (!size)
return -1;
/* Set token type and update global help with details. */
@@ -2731,12 +2811,12 @@ static struct cmdline_token_hdr cmd_flow_token_hdr = {
/** Populate the next dynamic token. */
static void
cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
- cmdline_parse_token_hdr_t *(*hdrs)[])
+ cmdline_parse_token_hdr_t **hdr_inst)
{
struct context *ctx = &cmd_flow_context;
/* Always reinitialize context before requesting the first token. */
- if (!(hdr - *hdrs))
+ if (!(hdr_inst - cmd_flow.tokens))
cmd_flow_context_init(ctx);
/* Return NULL when no more tokens are expected. */
if (!ctx->next_num && ctx->curr) {
@@ -2786,6 +2866,9 @@ cmd_flow_parsed(const struct buffer *in)
port_flow_list(in->port, in->args.list.group_n,
in->args.list.group);
break;
+ case ISOLATE:
+ port_flow_isolate(in->port, in->args.isolate.set);
+ break;
default:
break;
}
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 83a8f526..3ae3e1cd 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright 2013-2014 6WIND S.A.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,42 +31,11 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* BSD LICENSE
- *
- * Copyright 2013-2014 6WIND S.A.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
#include <stdarg.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
-#include <stdarg.h>
#include <stdint.h>
#include <inttypes.h>
@@ -97,6 +67,10 @@
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+#include <rte_pmd_bnxt.h>
+#endif
+#include <rte_gro.h>
#include "testpmd.h"
@@ -972,6 +946,7 @@ static const struct {
MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
+ MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
};
/** Compute storage space needed by item specification. */
@@ -979,8 +954,10 @@ static void
flow_item_spec_size(const struct rte_flow_item *item,
size_t *size, size_t *pad)
{
- if (!item->spec)
+ if (!item->spec) {
+ *size = 0;
goto empty;
+ }
switch (item->type) {
union {
const struct rte_flow_item_raw *raw;
@@ -992,10 +969,10 @@ flow_item_spec_size(const struct rte_flow_item *item,
spec.raw->length * sizeof(*spec.raw->pattern);
break;
default:
-empty:
- *size = 0;
+ *size = flow_item[item->type].size;
break;
}
+empty:
*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
}
@@ -1030,8 +1007,10 @@ static void
flow_action_conf_size(const struct rte_flow_action *action,
size_t *size, size_t *pad)
{
- if (!action->conf)
+ if (!action->conf) {
+ *size = 0;
goto empty;
+ }
switch (action->type) {
union {
const struct rte_flow_action_rss *rss;
@@ -1043,10 +1022,10 @@ flow_action_conf_size(const struct rte_flow_action *action,
conf.rss->num * sizeof(*conf.rss->queue);
break;
default:
-empty:
- *size = 0;
+ *size = flow_action[action->type].size;
break;
}
+empty:
*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
}
@@ -1437,6 +1416,22 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
}
}
+/** Restrict ingress traffic to the defined flow rules. */
+int
+port_flow_isolate(portid_t port_id, int set)
+{
+ struct rte_flow_error error;
+
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x66, sizeof(error));
+ if (rte_flow_isolate(port_id, set, &error))
+ return port_flow_complain(&error);
+ printf("Ingress traffic on port %u is %s to the defined flow rules\n",
+ port_id,
+ set ? "now restricted" : "not restricted anymore");
+ return 0;
+}
+
/*
* RX/TX ring descriptors display functions.
*/
@@ -2424,6 +2419,41 @@ set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
tx_pkt_nb_segs = (uint8_t) nb_segs;
}
+void
+setup_gro(const char *mode, uint8_t port_id)
+{
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ printf("invalid port id %u\n", port_id);
+ return;
+ }
+ if (test_done == 0) {
+ printf("Before enable/disable GRO,"
+ " please stop forwarding first\n");
+ return;
+ }
+ if (strcmp(mode, "on") == 0) {
+ if (gro_ports[port_id].enable) {
+ printf("port %u has enabled GRO\n", port_id);
+ return;
+ }
+ gro_ports[port_id].enable = 1;
+ gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
+
+ if (gro_ports[port_id].param.max_flow_num == 0)
+ gro_ports[port_id].param.max_flow_num =
+ GRO_DEFAULT_FLOW_NUM;
+ if (gro_ports[port_id].param.max_item_per_flow == 0)
+ gro_ports[port_id].param.max_item_per_flow =
+ GRO_DEFAULT_ITEM_NUM_PER_FLOW;
+ } else {
+ if (gro_ports[port_id].enable == 0) {
+ printf("port %u has disabled GRO\n", port_id);
+ return;
+ }
+ gro_ports[port_id].enable = 0;
+ }
+}
+
char*
list_pkt_forwarding_modes(void)
{
@@ -3010,10 +3040,10 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
}
-#ifdef RTE_LIBRTE_IXGBE_PMD
void
set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
{
+#ifdef RTE_LIBRTE_IXGBE_PMD
int diag;
if (is_rx)
@@ -3023,15 +3053,15 @@ set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
if (diag == 0)
return;
- if(is_rx)
- printf("rte_pmd_ixgbe_set_vf_rx for port_id=%d failed "
- "diag=%d\n", port_id, diag);
- else
- printf("rte_pmd_ixgbe_set_vf_tx for port_id=%d failed "
- "diag=%d\n", port_id, diag);
-
-}
+ printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
+ is_rx ? "rx" : "tx", port_id, diag);
+ return;
#endif
+ printf("VF %s setting not supported for port %d\n",
+ is_rx ? "Rx" : "Tx", port_id);
+ RTE_SET_USED(vf);
+ RTE_SET_USED(on);
+}
int
set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
@@ -3055,20 +3085,27 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
return diag;
}
-#ifdef RTE_LIBRTE_IXGBE_PMD
int
set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
{
- int diag;
+ int diag = -ENOTSUP;
- diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, q_msk);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (diag == -ENOTSUP)
+ diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
+ q_msk);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (diag == -ENOTSUP)
+ diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
+#endif
if (diag == 0)
return diag;
- printf("rte_pmd_ixgbe_set_vf_rate_limit for port_id=%d failed diag=%d\n",
+
+ printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
port_id, diag);
return diag;
}
-#endif
/*
* Functions to manage the set of filtered Multicast MAC addresses.
@@ -3312,6 +3349,27 @@ open_ddp_package_file(const char *file_path, uint32_t *size)
}
int
+save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size)
+{
+ FILE *fh = fopen(file_path, "wb");
+
+ if (fh == NULL) {
+ printf("%s: Failed to open %s\n", __func__, file_path);
+ return -1;
+ }
+
+ if (fwrite(buf, 1, size, fh) != size) {
+ fclose(fh);
+ printf("%s: File write operation failed\n", __func__);
+ return -1;
+ }
+
+ fclose(fh);
+
+ return 0;
+}
+
+int
close_ddp_package_file(uint8_t *buf)
{
if (buf) {
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 66fc9a00..90c81198 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -56,10 +56,8 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_memcpy.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_ether.h>
@@ -71,6 +69,7 @@
#include <rte_prefetch.h>
#include <rte_string_fns.h>
#include <rte_flow.h>
+#include <rte_gro.h>
#include "testpmd.h"
#define IP_DEFTTL 64 /* from RFC 1340. */
@@ -658,6 +657,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
if (unlikely(nb_rx == 0))
return;
+ if (unlikely(gro_ports[fs->rx_port].enable))
+ nb_rx = rte_gro_reassemble_burst(pkts_burst,
+ nb_rx,
+ &(gro_ports[fs->rx_port].param));
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 13b4f900..54e56f60 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -57,7 +57,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 15cb4a20..9b54f654 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -55,7 +55,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index cf7eab12..06dbc73a 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 3a093512..19cda0ea 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index fbe6284c..2f7f70fd 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -41,7 +41,6 @@
#include <time.h>
#include <fcntl.h>
#include <sys/types.h>
-#include <errno.h>
#include <sys/queue.h>
#include <sys/stat.h>
@@ -89,6 +88,7 @@ usage(char* progname)
"[--cmdline-file=FILENAME] "
#endif
"[--help|-h] | [--auto-start|-a] | ["
+ "--tx-first | --stats-period=PERIOD | "
"--coremask=COREMASK --portmask=PORTMASK --numa "
"--mbuf-size= | --total-num-mbufs= | "
"--nb-cores= | --nb-ports= | "
@@ -109,6 +109,10 @@ usage(char* progname)
printf(" --auto-start: start forwarding on init "
"[always when non-interactive].\n");
printf(" --help: display this message and quit.\n");
+ printf(" --tx-first: start forwarding sending a burst first "
+ "(only if interactive is disabled).\n");
+ printf(" --stats-period=PERIOD: statistics will be shown "
+ "every PERIOD seconds (only if interactive is disabled).\n");
printf(" --nb-cores=N: set the number of forwarding cores "
"(1 <= N <= %d).\n", nb_lcores);
printf(" --nb-ports=N: set the number of forwarding ports "
@@ -207,9 +211,11 @@ usage(char* progname)
printf(" --bitrate-stats=N: set the logical core N to perform "
"bit-rate calculation.\n");
printf(" --print-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|all>: "
- "enable print of designated event or all of them.");
+ "enable print of designated event or all of them.\n");
printf(" --mask-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|all>: "
- "disable print of designated event or all of them.");
+ "disable print of designated event or all of them.\n");
+ printf(" --flow-isolate-all: "
+ "requests flow API isolated mode on all ports at initialization time.\n");
}
#ifdef RTE_LIBRTE_CMDLINE
@@ -566,6 +572,8 @@ launch_args_parse(int argc, char** argv)
{ "eth-peers-configfile", 1, 0, 0 },
{ "eth-peer", 1, 0, 0 },
#endif
+ { "tx-first", 0, 0, 0 },
+ { "stats-period", 1, 0, 0 },
{ "ports", 1, 0, 0 },
{ "nb-cores", 1, 0, 0 },
{ "nb-ports", 1, 0, 0 },
@@ -623,6 +631,7 @@ launch_args_parse(int argc, char** argv)
{ "tx-queue-stats-mapping", 1, 0, 0 },
{ "rx-queue-stats-mapping", 1, 0, 0 },
{ "no-flush-rx", 0, 0, 0 },
+ { "flow-isolate-all", 0, 0, 0 },
{ "txpkts", 1, 0, 0 },
{ "disable-link-check", 0, 0, 0 },
{ "no-lsc-interrupt", 0, 0, 0 },
@@ -674,6 +683,23 @@ launch_args_parse(int argc, char** argv)
printf("Auto-start selected\n");
auto_start = 1;
}
+ if (!strcmp(lgopts[opt_idx].name, "tx-first")) {
+ printf("Ports to start sending a burst of "
+ "packets first\n");
+ tx_first = 1;
+ }
+ if (!strcmp(lgopts[opt_idx].name, "stats-period")) {
+ char *end = NULL;
+ unsigned int n;
+
+ n = strtoul(optarg, &end, 10);
+ if ((optarg[0] == '\0') || (end == NULL) ||
+ (*end != '\0'))
+ break;
+
+ stats_period = n;
+ break;
+ }
if (!strcmp(lgopts[opt_idx].name,
"eth-peers-configfile")) {
if (init_peer_eth_addrs(optarg) != 0)
@@ -1081,6 +1107,8 @@ launch_args_parse(int argc, char** argv)
lsc_interrupt = 0;
if (!strcmp(lgopts[opt_idx].name, "no-rmv-interrupt"))
rmv_interrupt = 0;
+ if (!strcmp(lgopts[opt_idx].name, "flow-isolate-all"))
+ flow_isolate_all = 1;
if (!strcmp(lgopts[opt_idx].name, "print-event"))
if (parse_event_printing_config(optarg, 1)) {
rte_exit(EXIT_FAILURE,
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index dcd1d85c..5ef02190 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index d1041afa..7d401394 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -73,6 +73,9 @@
#include <rte_ethdev.h>
#include <rte_dev.h>
#include <rte_string_fns.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
#ifdef RTE_LIBRTE_PMD_XENVIRT
#include <rte_eth_xenvirt.h>
#endif
@@ -87,6 +90,7 @@
#ifdef RTE_LIBRTE_LATENCY_STATS
#include <rte_latencystats.h>
#endif
+#include <rte_gro.h>
#include "testpmd.h"
@@ -95,6 +99,7 @@ uint16_t verbose_level = 0; /**< Silent by default. */
/* use master core for command line ? */
uint8_t interactive = 0;
uint8_t auto_start = 0;
+uint8_t tx_first;
char cmdline_filename[PATH_MAX] = {0};
/*
@@ -177,7 +182,7 @@ uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
* specified on command-line. */
-
+uint16_t stats_period; /**< Period to show statistics (disabled by default) */
/*
* Configuration of packet segments used by the "txonly" processing engine.
*/
@@ -267,6 +272,11 @@ uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
uint8_t no_flush_rx = 0; /* flush by default */
/*
+ * Flow API isolated mode.
+ */
+uint8_t flow_isolate_all;
+
+/*
* Avoids to check link status when starting/stopping a port.
*/
uint8_t no_link_check = 0; /* check by default */
@@ -295,13 +305,13 @@ uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
/*
* NIC bypass mode configuration options.
*/
-#ifdef RTE_NIC_BYPASS
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
/* The NIC bypass watchdog timeout. */
-uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
-
+uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
#endif
+
#ifdef RTE_LIBRTE_LATENCY_STATS
/*
@@ -375,12 +385,14 @@ lcoreid_t bitrate_lcore_id;
uint8_t bitrate_enabled;
#endif
+struct gro_status gro_ports[RTE_MAX_ETHPORTS];
+
/* Forward function declarations */
static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
static void check_all_ports_link_status(uint32_t port_mask);
-static void eth_event_callback(uint8_t port_id,
- enum rte_eth_event_type type,
- void *param);
+static int eth_event_callback(uint8_t port_id,
+ enum rte_eth_event_type type,
+ void *param, void *ret_param);
/*
* Check if all the ports are started.
@@ -389,7 +401,7 @@ static void eth_event_callback(uint8_t port_id,
static int all_ports_started(void);
/*
- * Helper function to check if socket is allready discovered.
+ * Helper function to check if socket is already discovered.
* If yes, return positive value. If not, return zero.
*/
int
@@ -1422,6 +1434,15 @@ start_port(portid_t pid)
if (port->need_reconfig > 0) {
port->need_reconfig = 0;
+ if (flow_isolate_all) {
+ int ret = port_flow_isolate(pi, 1);
+ if (ret) {
+ printf("Failed to apply isolated"
+ " mode on port %d\n", pi);
+ return -1;
+ }
+ }
+
printf("Configuring Port %d (socket %u)\n", pi,
port->socket_id);
/* configure port */
@@ -1707,8 +1728,10 @@ detach_port(uint8_t port_id)
if (ports[port_id].flow_list)
port_flow_flush(port_id);
- if (rte_eth_dev_detach(port_id, name))
+ if (rte_eth_dev_detach(port_id, name)) {
+ RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
return;
+ }
nb_ports = rte_eth_dev_count();
@@ -1806,28 +1829,23 @@ static void
rmv_event_callback(void *arg)
{
struct rte_eth_dev *dev;
- struct rte_devargs *da;
- char name[32] = "";
uint8_t port_id = (intptr_t)arg;
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- da = dev->device->devargs;
stop_port(port_id);
close_port(port_id);
- if (da->type == RTE_DEVTYPE_VIRTUAL)
- snprintf(name, sizeof(name), "%s", da->virt.drv_name);
- else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
- rte_pci_device_name(&da->pci.addr, name, sizeof(name));
- printf("removing device %s\n", name);
- rte_eal_dev_detach(name);
- dev->state = RTE_ETH_DEV_UNUSED;
+ printf("removing device %s\n", dev->device->name);
+ if (rte_eal_dev_detach(dev->device))
+ RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
+ dev->device->name);
}
/* This function is used by the interrupt thread */
-static void
-eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
+static int
+eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
+ void *ret_param)
{
static const char * const event_desc[] = {
[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
@@ -1841,6 +1859,7 @@ eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
};
RTE_SET_USED(param);
+ RTE_SET_USED(ret_param);
if (type >= RTE_ETH_EVENT_MAX) {
fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
@@ -1861,6 +1880,7 @@ eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
default:
break;
}
+ return 0;
}
static int
@@ -2012,8 +2032,8 @@ init_port_config(void)
rte_eth_macaddr_get(pid, &port->eth_addr);
map_port_queue_stats_mapping_registers(pid, port);
-#ifdef RTE_NIC_BYPASS
- rte_eth_dev_bypass_init(pid);
+#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
+ rte_pmd_ixgbe_bypass_init(pid);
#endif
if (lsc_interrupt &&
@@ -2229,6 +2249,21 @@ force_quit(void)
}
static void
+print_stats(void)
+{
+ uint8_t i;
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, top_left);
+
+ printf("\nPort statistics ====================================");
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
+ nic_stats_display(fwd_ports_ids[i]);
+}
+
+static void
signal_handler(int signum)
{
if (signum == SIGINT || signum == SIGTERM) {
@@ -2291,6 +2326,16 @@ main(int argc, char** argv)
if (argc > 1)
launch_args_parse(argc, argv);
+ if (tx_first && interactive)
+ rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
+ "interactive mode.\n");
+
+ if (tx_first && lsc_interrupt) {
+ printf("Warning: lsc_interrupt needs to be off when "
+ " using tx_first. Disabling.\n");
+ lsc_interrupt = 0;
+ }
+
if (!nb_rxq && !nb_txq)
printf("Warning: Either rx or tx queues should be non-zero\n");
@@ -2350,7 +2395,29 @@ main(int argc, char** argv)
int rc;
printf("No commandline core given, start packet forwarding\n");
- start_packet_forwarding(0);
+ start_packet_forwarding(tx_first);
+ if (stats_period != 0) {
+ uint64_t prev_time = 0, cur_time, diff_time = 0;
+ uint64_t timer_period;
+
+ /* Convert to number of cycles */
+ timer_period = stats_period * rte_get_timer_hz();
+
+ while (1) {
+ cur_time = rte_get_timer_cycles();
+ diff_time += cur_time - prev_time;
+
+ if (diff_time >= timer_period) {
+ print_stats();
+ /* Reset the timer */
+ diff_time = 0;
+ }
+ /* Sleep to avoid unnecessary checks */
+ prev_time = cur_time;
+ sleep(1);
+ }
+ }
+
printf("Press enter to exit\n");
rc = read(0, &c, 1);
pmd_test_exit();
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index e6c43ba0..c9d7739b 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -34,6 +34,9 @@
#ifndef _TESTPMD_H_
#define _TESTPMD_H_
+#include <rte_pci.h>
+#include <rte_gro.h>
+
#define RTE_PORT_ALL (~(portid_t)0x0)
#define RTE_TEST_RX_DESC_MAX 2048
@@ -299,10 +302,12 @@ extern uint16_t nb_rx_queue_stats_mappings;
extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */
extern uint8_t interactive;
extern uint8_t auto_start;
+extern uint8_t tx_first;
extern char cmdline_filename[PATH_MAX]; /**< offline commands file */
extern uint8_t numa_support; /**< set by "--numa" parameter */
extern uint16_t port_topology; /**< set by "--port-topology" parameter */
extern uint8_t no_flush_rx; /**<set by "--no-flush-rx" parameter */
+extern uint8_t flow_isolate_all; /**< set by "--flow-isolate-all */
extern uint8_t mp_anon; /**< set by "--mp-anon" parameter */
extern uint8_t no_link_check; /**<set by "--disable-link-check" parameter */
extern volatile int test_done; /* stop packet forwarding when set to 1. */
@@ -311,7 +316,7 @@ extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */
extern uint32_t event_print_mask;
/**< set by "--print-event xxxx" and "--mask-event xxxx parameters */
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */
#endif
@@ -378,6 +383,7 @@ extern enum dcb_queue_mapping_mode dcb_q_mapping;
extern uint16_t mbuf_data_size; /**< Mbuf data space size. */
extern uint32_t param_total_num_mbufs;
+extern uint16_t stats_period;
#ifdef RTE_LIBRTE_LATENCY_STATS
extern uint8_t latencystats_enabled;
@@ -428,6 +434,14 @@ extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */
+#define GRO_DEFAULT_FLOW_NUM 4
+#define GRO_DEFAULT_ITEM_NUM_PER_FLOW DEF_PKT_BURST
+struct gro_status {
+ struct rte_gro_param param;
+ uint8_t enable;
+};
+extern struct gro_status gro_ports[RTE_MAX_ETHPORTS];
+
static inline unsigned int
lcore_num(void)
{
@@ -543,6 +557,7 @@ int port_flow_flush(portid_t port_id);
int port_flow_query(portid_t port_id, uint32_t rule,
enum rte_flow_action_type action);
void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group);
+int port_flow_isolate(portid_t port_id, int set);
void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id);
void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id);
@@ -625,6 +640,7 @@ void get_2tuple_filter(uint8_t port_id, uint16_t index);
void get_5tuple_filter(uint8_t port_id, uint16_t index);
int rx_queue_id_is_invalid(queueid_t rxq_id);
int tx_queue_id_is_invalid(queueid_t txq_id);
+void setup_gro(const char *mode, uint8_t port_id);
/* Functions to manage the set of filtered Multicast MAC addresses */
void mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr);
@@ -632,6 +648,7 @@ void mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr);
void port_dcb_info_display(uint8_t port_id);
uint8_t *open_ddp_package_file(const char *file_path, uint32_t *size);
+int save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size);
int close_ddp_package_file(uint8_t *buf);
enum print_warning {
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 8b1a2afc..7070ddc3 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -56,10 +56,8 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_memcpy.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_ether.h>