aboutsummaryrefslogtreecommitdiffstats
path: root/app/test-crypto-perf/cperf_ops.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-02-19 11:16:57 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-02-19 11:17:28 +0000
commitca33590b6af032bff57d9cc70455660466a654b2 (patch)
tree0b68b090bd9b4a78a3614b62400b29279d76d553 /app/test-crypto-perf/cperf_ops.c
parent169a9de21e263aa6599cdc2d87a45ae158d9f509 (diff)
New upstream version 18.02upstream/18.02
Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'app/test-crypto-perf/cperf_ops.c')
-rw-r--r--app/test-crypto-perf/cperf_ops.c109
1 files changed, 58 insertions, 51 deletions
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index 23d30ca3..8f320099 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_cryptodev.h>
@@ -41,7 +13,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
- uint16_t iv_offset __rte_unused)
+ uint16_t iv_offset __rte_unused, uint32_t *imix_idx)
{
uint16_t i;
@@ -62,7 +34,12 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
dst_buf_offset);
/* cipher parameters */
- sym_op->cipher.data.length = options->test_buffer_size;
+ if (options->imix_distribution_count) {
+ sym_op->cipher.data.length =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ sym_op->cipher.data.length = options->test_buffer_size;
sym_op->cipher.data.offset = 0;
}
@@ -75,7 +52,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
- uint16_t iv_offset __rte_unused)
+ uint16_t iv_offset __rte_unused, uint32_t *imix_idx)
{
uint16_t i;
@@ -96,7 +73,12 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
dst_buf_offset);
/* auth parameters */
- sym_op->auth.data.length = options->test_buffer_size;
+ if (options->imix_distribution_count) {
+ sym_op->auth.data.length =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ sym_op->auth.data.length = options->test_buffer_size;
sym_op->auth.data.offset = 0;
}
@@ -109,7 +91,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
- uint16_t iv_offset)
+ uint16_t iv_offset, uint32_t *imix_idx)
{
uint16_t i;
@@ -130,12 +112,17 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
dst_buf_offset);
/* cipher parameters */
+ if (options->imix_distribution_count) {
+ sym_op->cipher.data.length =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ sym_op->cipher.data.length = options->test_buffer_size;
+
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
- sym_op->cipher.data.length = options->test_buffer_size << 3;
- else
- sym_op->cipher.data.length = options->test_buffer_size;
+ sym_op->cipher.data.length <<= 3;
sym_op->cipher.data.offset = 0;
}
@@ -160,7 +147,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
- uint16_t iv_offset)
+ uint16_t iv_offset, uint32_t *imix_idx)
{
uint16_t i;
@@ -225,12 +212,17 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
}
+ if (options->imix_distribution_count) {
+ sym_op->auth.data.length =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ sym_op->auth.data.length = options->test_buffer_size;
+
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
- sym_op->auth.data.length = options->test_buffer_size << 3;
- else
- sym_op->auth.data.length = options->test_buffer_size;
+ sym_op->auth.data.length <<= 3;
sym_op->auth.data.offset = 0;
}
@@ -255,7 +247,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
- uint16_t iv_offset)
+ uint16_t iv_offset, uint32_t *imix_idx)
{
uint16_t i;
@@ -276,12 +268,17 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
dst_buf_offset);
/* cipher parameters */
+ if (options->imix_distribution_count) {
+ sym_op->cipher.data.length =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ sym_op->cipher.data.length = options->test_buffer_size;
+
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
- sym_op->cipher.data.length = options->test_buffer_size << 3;
- else
- sym_op->cipher.data.length = options->test_buffer_size;
+ sym_op->cipher.data.length <<= 3;
sym_op->cipher.data.offset = 0;
@@ -321,12 +318,17 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
rte_pktmbuf_iova_offset(buf, offset);
}
+ if (options->imix_distribution_count) {
+ sym_op->auth.data.length =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ sym_op->auth.data.length = options->test_buffer_size;
+
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
- sym_op->auth.data.length = options->test_buffer_size << 3;
- else
- sym_op->auth.data.length = options->test_buffer_size;
+ sym_op->auth.data.length <<= 3;
sym_op->auth.data.offset = 0;
}
@@ -360,7 +362,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
- uint16_t iv_offset)
+ uint16_t iv_offset, uint32_t *imix_idx)
{
uint16_t i;
/* AAD is placed after the IV */
@@ -384,7 +386,12 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
dst_buf_offset);
/* AEAD parameters */
- sym_op->aead.data.length = options->test_buffer_size;
+ if (options->imix_distribution_count) {
+ sym_op->aead.data.length =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ sym_op->aead.data.length = options->test_buffer_size;
sym_op->aead.data.offset = 0;
sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],