diff options
author | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2016-07-06 09:22:35 +0200 |
---|---|---|
committer | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2016-07-06 16:15:13 +0200 |
commit | 809f08006d56e7ba4ce190b0a63d44acf62d8044 (patch) | |
tree | d93fbe3244ee0cff16a6af830c7efb15c26e5ef4 /drivers/crypto | |
parent | b8ce7c38b99df118002fb460e680fabf16944f6c (diff) |
Imported Upstream version 16.07-rc1
Change-Id: If3f757dc95532706b04053286c6b54492169f1a3
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/Makefile | 7 | ||||
-rw-r--r-- | drivers/crypto/aesni_mb/Makefile | 5 | ||||
-rw-r--r-- | drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 9 | ||||
-rw-r--r-- | drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 20 | ||||
-rw-r--r-- | drivers/crypto/kasumi/Makefile | 69 | ||||
-rw-r--r-- | drivers/crypto/kasumi/rte_kasumi_pmd.c | 657 | ||||
-rw-r--r-- | drivers/crypto/kasumi/rte_kasumi_pmd_ops.c | 344 | ||||
-rw-r--r-- | drivers/crypto/kasumi/rte_kasumi_pmd_private.h | 106 | ||||
-rw-r--r-- | drivers/crypto/kasumi/rte_pmd_kasumi_version.map | 3 | ||||
-rw-r--r-- | drivers/crypto/null/Makefile | 2 | ||||
-rw-r--r-- | drivers/crypto/qat/Makefile | 2 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 7 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_crypto.c | 45 | ||||
-rw-r--r-- | drivers/crypto/qat/rte_qat_cryptodev.c | 5 | ||||
-rw-r--r-- | drivers/crypto/snow3g/Makefile | 17 | ||||
-rw-r--r-- | drivers/crypto/snow3g/rte_snow3g_pmd.c | 215 |
17 files changed, 1431 insertions, 83 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index b4205385..dc4ef7f9 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -35,6 +35,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g +DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile index aa2621bd..5898cae1 100644 --- a/drivers/crypto/aesni_gcm/Makefile +++ b/drivers/crypto/aesni_gcm/Makefile @@ -30,9 +30,11 @@ include $(RTE_SDK)/mk/rte.vars.mk +ifneq ($(MAKECMDGOALS),clean) ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) $(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") endif +endif # library name LIB = librte_pmd_aesni_gcm.a @@ -47,9 +49,10 @@ LIBABIVER := 1 # versioning export map EXPORT_MAP := rte_pmd_aesni_gcm_version.map -# external library include paths +# external library dependencies CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include +LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB LDLIBS += -lcrypto # library source files @@ -62,6 +65,8 @@ SYMLINK-y-include += # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_ring DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile index ec65291f..d3994cc6 100644 --- a/drivers/crypto/aesni_mb/Makefile +++ b/drivers/crypto/aesni_mb/Makefile @@ -49,9 +49,10 @@ LIBABIVER := 1 # versioning export map EXPORT_MAP := rte_pmd_aesni_version.map -# external library include paths +# external library dependencies CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include +LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB # library source files SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c @@ -60,6 +61,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_ring DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_cryptodev include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 3415ac1b..6554fc4e 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -222,6 +222,9 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops, case RTE_CRYPTO_CIPHER_AES_CBC: sess->cipher.mode = CBC; break; + case RTE_CRYPTO_CIPHER_AES_CTR: + sess->cipher.mode = CNTR; + break; default: MB_LOG_ERR("Unsupported cipher mode parameter"); return -1; @@ -379,9 +382,11 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op, /* append space for output data to mbuf */ char *odata = rte_pktmbuf_append(m_dst, rte_pktmbuf_data_len(op->sym->m_src)); - if (odata == NULL) + if (odata == NULL) { MB_LOG_ERR("failed to allocate space in destination " "mbuf for source data"); + return NULL; + } memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*), rte_pktmbuf_data_len(op->sym->m_src)); @@ -560,7 +565,7 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, goto flush_jobs; else qp->stats.enqueued_count += processed_jobs; - return i; + return i; flush_jobs: /* diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 3806a66e..d3c46ace 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -207,6 +207,26 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { }, } }, } }, + { /* AES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; diff --git a/drivers/crypto/kasumi/Makefile b/drivers/crypto/kasumi/Makefile new file mode 100644 index 00000000..9fb0be85 --- /dev/null +++ b/drivers/crypto/kasumi/Makefile @@ -0,0 +1,69 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Intel Corporation. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifeq ($(LIBSSO_KASUMI_PATH),) +$(error "Please define LIBSSO_KASUMI_PATH environment variable") +endif +endif + +# library name +LIB = librte_pmd_kasumi.a + +# build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_kasumi_version.map + +# external library dependencies +CFLAGS += -I$(LIBSSO_KASUMI_PATH) +CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include +CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build +LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd_ops.c + +# library dependencies +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_ring +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_cryptodev + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c new file mode 100644 index 00000000..5f8c7a2e --- /dev/null +++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c @@ -0,0 +1,657 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_common.h> +#include <rte_config.h> +#include <rte_hexdump.h> +#include <rte_cryptodev.h> +#include <rte_cryptodev_pmd.h> +#include <rte_dev.h> +#include <rte_malloc.h> +#include <rte_cpuflags.h> + +#include "rte_kasumi_pmd_private.h" + +#define KASUMI_KEY_LENGTH 16 +#define KASUMI_IV_LENGTH 8 +#define KASUMI_DIGEST_LENGTH 4 +#define KASUMI_MAX_BURST 4 +#define BYTE_LEN 8 + +/** + * Global static parameter used to create a unique name for each KASUMI + * crypto device. + */ +static unsigned unique_name_id; + +static inline int +create_unique_device_name(char *name, size_t size) +{ + int ret; + + if (name == NULL) + return -EINVAL; + + ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_KASUMI_PMD, + unique_name_id++); + if (ret < 0) + return ret; + return 0; +} + +/** Get xform chain order. */ +static enum kasumi_operation +kasumi_get_mode(const struct rte_crypto_sym_xform *xform) +{ + if (xform == NULL) + return KASUMI_OP_NOT_SUPPORTED; + + if (xform->next) + if (xform->next->next != NULL) + return KASUMI_OP_NOT_SUPPORTED; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + if (xform->next == NULL) + return KASUMI_OP_ONLY_AUTH; + else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return KASUMI_OP_AUTH_CIPHER; + else + return KASUMI_OP_NOT_SUPPORTED; + } + + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + if (xform->next == NULL) + return KASUMI_OP_ONLY_CIPHER; + else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return KASUMI_OP_CIPHER_AUTH; + else + return KASUMI_OP_NOT_SUPPORTED; + } + + return KASUMI_OP_NOT_SUPPORTED; +} + + +/** Parse crypto xform chain and set private session parameters. */ +int +kasumi_set_session_parameters(struct kasumi_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_sym_xform *auth_xform = NULL; + const struct rte_crypto_sym_xform *cipher_xform = NULL; + int mode; + + /* Select Crypto operation - hash then cipher / cipher then hash */ + mode = kasumi_get_mode(xform); + + switch (mode) { + case KASUMI_OP_CIPHER_AUTH: + auth_xform = xform->next; + /* Fall-through */ + case KASUMI_OP_ONLY_CIPHER: + cipher_xform = xform; + break; + case KASUMI_OP_AUTH_CIPHER: + cipher_xform = xform->next; + /* Fall-through */ + case KASUMI_OP_ONLY_AUTH: + auth_xform = xform; + } + + if (mode == KASUMI_OP_NOT_SUPPORTED) { + KASUMI_LOG_ERR("Unsupported operation chain order parameter"); + return -EINVAL; + } + + if (cipher_xform) { + /* Only KASUMI F8 supported */ + if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) + return -EINVAL; + /* Initialize key */ + sso_kasumi_init_f8_key_sched(xform->cipher.key.data, + &sess->pKeySched_cipher); + } + + if (auth_xform) { + /* Only KASUMI F9 supported */ + if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) + return -EINVAL; + sess->auth_op = auth_xform->auth.op; + /* Initialize key */ + sso_kasumi_init_f9_key_sched(xform->auth.key.data, + &sess->pKeySched_hash); + } + + + sess->op = mode; + + return 0; +} + +/** Get KASUMI session. */ +static struct kasumi_session * +kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op) +{ + struct kasumi_session *sess; + + if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) { + if (unlikely(op->sym->session->dev_type != + RTE_CRYPTODEV_KASUMI_PMD)) + return NULL; + + sess = (struct kasumi_session *)op->sym->session->_private; + } else { + struct rte_cryptodev_session *c_sess = NULL; + + if (rte_mempool_get(qp->sess_mp, (void **)&c_sess)) + return NULL; + + sess = (struct kasumi_session *)c_sess->_private; + + if (unlikely(kasumi_set_session_parameters(sess, + op->sym->xform) != 0)) + return NULL; + } + + return sess; +} + +/** Encrypt/decrypt mbufs with same cipher key. */ +static uint8_t +process_kasumi_cipher_op(struct rte_crypto_op **ops, + struct kasumi_session *session, + uint8_t num_ops) +{ + unsigned i; + uint8_t processed_ops = 0; + uint8_t *src[num_ops], *dst[num_ops]; + uint64_t IV[num_ops]; + uint32_t num_bytes[num_ops]; + + for (i = 0; i < num_ops; i++) { + /* Sanity checks. */ + if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("iv"); + break; + } + + src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + + (ops[i]->sym->cipher.data.offset >> 3); + dst[i] = ops[i]->sym->m_dst ? + rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) + + (ops[i]->sym->cipher.data.offset >> 3) : + rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + + (ops[i]->sym->cipher.data.offset >> 3); + IV[i] = *((uint64_t *)(ops[i]->sym->cipher.iv.data)); + num_bytes[i] = ops[i]->sym->cipher.data.length >> 3; + + processed_ops++; + } + + if (processed_ops != 0) + sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, IV, + src, dst, num_bytes, processed_ops); + + return processed_ops; +} + +/** Encrypt/decrypt mbuf (bit level function). */ +static uint8_t +process_kasumi_cipher_op_bit(struct rte_crypto_op *op, + struct kasumi_session *session) +{ + uint8_t *src, *dst; + uint64_t IV; + uint32_t length_in_bits, offset_in_bits; + + /* Sanity checks. */ + if (unlikely(op->sym->cipher.iv.length != KASUMI_IV_LENGTH)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("iv"); + return 0; + } + + offset_in_bits = op->sym->cipher.data.offset; + src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + dst = op->sym->m_dst ? + rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *) : + rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + IV = *((uint64_t *)(op->sym->cipher.iv.data)); + length_in_bits = op->sym->cipher.data.length; + + sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, IV, + src, dst, length_in_bits, offset_in_bits); + + return 1; +} + +/** Generate/verify hash from mbufs with same hash key. */ +static int +process_kasumi_hash_op(struct rte_crypto_op **ops, + struct kasumi_session *session, + uint8_t num_ops) +{ + unsigned i; + uint8_t processed_ops = 0; + uint8_t *src, *dst; + uint32_t length_in_bits; + uint32_t num_bytes; + uint32_t shift_bits; + uint64_t IV; + uint8_t direction; + + for (i = 0; i < num_ops; i++) { + if (unlikely(ops[i]->sym->auth.aad.length != KASUMI_IV_LENGTH)) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("aad"); + break; + } + + if (unlikely(ops[i]->sym->auth.digest.length != KASUMI_DIGEST_LENGTH)) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("digest"); + break; + } + + /* Data must be byte aligned */ + if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("offset"); + break; + } + + length_in_bits = ops[i]->sym->auth.data.length; + + src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + + (ops[i]->sym->auth.data.offset >> 3); + /* IV from AAD */ + IV = *((uint64_t *)(ops[i]->sym->auth.aad.data)); + /* Direction from next bit after end of message */ + num_bytes = (length_in_bits >> 3) + 1; + shift_bits = (BYTE_LEN - 1 - length_in_bits) % BYTE_LEN; + direction = (src[num_bytes - 1] >> shift_bits) & 0x01; + + if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { + dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src, + ops[i]->sym->auth.digest.length); + + sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash, + IV, src, + length_in_bits, dst, direction); + /* Verify digest. */ + if (memcmp(dst, ops[i]->sym->auth.digest.data, + ops[i]->sym->auth.digest.length) != 0) + ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + + /* Trim area used for digest from mbuf. */ + rte_pktmbuf_trim(ops[i]->sym->m_src, + ops[i]->sym->auth.digest.length); + } else { + dst = ops[i]->sym->auth.digest.data; + + sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash, + IV, src, + length_in_bits, dst, direction); + } + processed_ops++; + } + + return processed_ops; +} + +/** Process a batch of crypto ops which shares the same session. */ +static int +process_ops(struct rte_crypto_op **ops, struct kasumi_session *session, + struct kasumi_qp *qp, uint8_t num_ops, + uint16_t *accumulated_enqueued_ops) +{ + unsigned i; + unsigned enqueued_ops, processed_ops; + + switch (session->op) { + case KASUMI_OP_ONLY_CIPHER: + processed_ops = process_kasumi_cipher_op(ops, + session, num_ops); + break; + case KASUMI_OP_ONLY_AUTH: + processed_ops = process_kasumi_hash_op(ops, session, + num_ops); + break; + case KASUMI_OP_CIPHER_AUTH: + processed_ops = process_kasumi_cipher_op(ops, session, + num_ops); + process_kasumi_hash_op(ops, session, processed_ops); + break; + case KASUMI_OP_AUTH_CIPHER: + processed_ops = process_kasumi_hash_op(ops, session, + num_ops); + process_kasumi_cipher_op(ops, session, processed_ops); + break; + default: + /* Operation not supported. */ + processed_ops = 0; + } + + for (i = 0; i < num_ops; i++) { + /* + * If there was no error/authentication failure, + * change status to successful. + */ + if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + /* Free session if a session-less crypto op. */ + if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + rte_mempool_put(qp->sess_mp, ops[i]->sym->session); + ops[i]->sym->session = NULL; + } + } + + enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops, + (void **)ops, processed_ops); + qp->qp_stats.enqueued_count += enqueued_ops; + *accumulated_enqueued_ops += enqueued_ops; + + return enqueued_ops; +} + +/** Process a crypto op with length/offset in bits. */ +static int +process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session, + struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops) +{ + unsigned enqueued_op, processed_op; + + switch (session->op) { + case KASUMI_OP_ONLY_CIPHER: + processed_op = process_kasumi_cipher_op_bit(op, + session); + break; + case KASUMI_OP_ONLY_AUTH: + processed_op = process_kasumi_hash_op(&op, session, 1); + break; + case KASUMI_OP_CIPHER_AUTH: + processed_op = process_kasumi_cipher_op_bit(op, session); + if (processed_op == 1) + process_kasumi_hash_op(&op, session, 1); + break; + case KASUMI_OP_AUTH_CIPHER: + processed_op = process_kasumi_hash_op(&op, session, 1); + if (processed_op == 1) + process_kasumi_cipher_op_bit(op, session); + break; + default: + /* Operation not supported. */ + processed_op = 0; + } + + /* + * If there was no error/authentication failure, + * change status to successful. + */ + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + /* Free session if a session-less crypto op. */ + if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + rte_mempool_put(qp->sess_mp, op->sym->session); + op->sym->session = NULL; + } + + enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op, + processed_op); + qp->qp_stats.enqueued_count += enqueued_op; + *accumulated_enqueued_ops += enqueued_op; + + return enqueued_op; +} + +static uint16_t +kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct rte_crypto_op *c_ops[nb_ops]; + struct rte_crypto_op *curr_c_op; + + struct kasumi_session *prev_sess = NULL, *curr_sess = NULL; + struct kasumi_qp *qp = queue_pair; + unsigned i; + uint8_t burst_size = 0; + uint16_t enqueued_ops = 0; + uint8_t processed_ops; + + for (i = 0; i < nb_ops; i++) { + curr_c_op = ops[i]; + + /* Set status as enqueued (not processed yet) by default. */ + curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + curr_sess = kasumi_get_session(qp, curr_c_op); + if (unlikely(curr_sess == NULL || + curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) { + curr_c_op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + break; + } + + /* If length/offset is at bit-level, process this buffer alone. */ + if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0) + || ((ops[i]->sym->cipher.data.offset + % BYTE_LEN) != 0)) { + /* Process the ops of the previous session. */ + if (prev_sess != NULL) { + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; + } + + burst_size = 0; + prev_sess = NULL; + } + + processed_ops = process_op_bit(curr_c_op, curr_sess, + qp, &enqueued_ops); + if (processed_ops != 1) + break; + + continue; + } + + /* Batch ops that share the same session. */ + if (prev_sess == NULL) { + prev_sess = curr_sess; + c_ops[burst_size++] = curr_c_op; + } else if (curr_sess == prev_sess) { + c_ops[burst_size++] = curr_c_op; + /* + * When there are enough ops to process in a batch, + * process them, and start a new batch. + */ + if (burst_size == KASUMI_MAX_BURST) { + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; + } + + burst_size = 0; + prev_sess = NULL; + } + } else { + /* + * Different session, process the ops + * of the previous session. + */ + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; + } + + burst_size = 0; + prev_sess = curr_sess; + + c_ops[burst_size++] = curr_c_op; + } + } + + if (burst_size != 0) { + /* Process the crypto ops of the last session. */ + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + } + + qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; + return enqueued_ops; +} + +static uint16_t +kasumi_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **c_ops, uint16_t nb_ops) +{ + struct kasumi_qp *qp = queue_pair; + + unsigned nb_dequeued; + + nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops, + (void **)c_ops, nb_ops); + qp->qp_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +static int cryptodev_kasumi_uninit(const char *name); + +static int +cryptodev_kasumi_create(const char *name, + struct rte_crypto_vdev_init_params *init_params) +{ + struct rte_cryptodev *dev; + char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + struct kasumi_private *internals; + uint64_t cpu_flags = 0; + + /* Check CPU for supported vector instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) + cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) + cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + else { + KASUMI_LOG_ERR("Vector instructions are not supported by CPU"); + return -EFAULT; + } + + /* Create a unique device name. */ + if (create_unique_device_name(crypto_dev_name, + RTE_CRYPTODEV_NAME_MAX_LEN) != 0) { + KASUMI_LOG_ERR("failed to create unique cryptodev name"); + return -EINVAL; + } + + dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name, + sizeof(struct kasumi_private), init_params->socket_id); + if (dev == NULL) { + KASUMI_LOG_ERR("failed to create cryptodev vdev"); + goto init_error; + } + + dev->dev_type = RTE_CRYPTODEV_KASUMI_PMD; + dev->dev_ops = rte_kasumi_pmd_ops; + + /* Register RX/TX burst functions for data path. */ + dev->dequeue_burst = kasumi_pmd_dequeue_burst; + dev->enqueue_burst = kasumi_pmd_enqueue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + cpu_flags; + + internals = dev->data->dev_private; + + internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; + internals->max_nb_sessions = init_params->max_nb_sessions; + + return 0; +init_error: + KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", name); + + cryptodev_kasumi_uninit(crypto_dev_name); + return -EFAULT; +} + +static int +cryptodev_kasumi_init(const char *name, + const char *input_args) +{ + struct rte_crypto_vdev_init_params init_params = { + RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS, + RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS, + rte_socket_id() + }; + + rte_cryptodev_parse_vdev_init_params(&init_params, input_args); + + RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, + init_params.socket_id); + RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n", + init_params.max_nb_queue_pairs); + RTE_LOG(INFO, PMD, " Max number of sessions = %d\n", + init_params.max_nb_sessions); + + return cryptodev_kasumi_create(name, &init_params); +} + +static int +cryptodev_kasumi_uninit(const char *name) +{ + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Closing KASUMI crypto device %s" + " on numa socket %u\n", + name, rte_socket_id()); + + return 0; +} + +static struct rte_driver cryptodev_kasumi_pmd_drv = { + .name = CRYPTODEV_NAME_KASUMI_PMD, + .type = PMD_VDEV, + .init = cryptodev_kasumi_init, + .uninit = cryptodev_kasumi_uninit +}; + +PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv); diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c new file mode 100644 index 00000000..da5854eb --- /dev/null +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c @@ -0,0 +1,344 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <string.h> + +#include <rte_common.h> +#include <rte_malloc.h> +#include <rte_cryptodev_pmd.h> + +#include "rte_kasumi_pmd_private.h" + +static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = { + { /* KASUMI (F9) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_KASUMI_F9, + .block_size = 8, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 4, + .max = 4, + .increment = 0 + }, + .aad_size = { + .min = 9, + .max = 9, + .increment = 0 + } + }, } + }, } + }, + { /* KASUMI (F8) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, + .block_size = 8, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +/** Configure device */ +static int +kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** Start device */ +static int +kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** Stop device */ +static void +kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev) +{ +} + +/** Close device */ +static int +kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + + +/** Get device statistics */ +static void +kasumi_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct kasumi_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } +} + +/** Reset device statistics */ +static void +kasumi_pmd_stats_reset(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct kasumi_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + + +/** Get device info */ +static void +kasumi_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct kasumi_private *internals = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->dev_type = dev->dev_type; + dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; + dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = kasumi_pmd_capabilities; + } +} + +/** Release queue pair */ +static int +kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + struct kasumi_qp *qp = dev->data->queue_pairs[qp_id]; + + if (qp != NULL) { + rte_ring_free(qp->processed_ops); + rte_free(qp); + dev->data->queue_pairs[qp_id] = NULL; + } + return 0; +} + +/** set a unique name for the queue pair based on its name, dev_id and qp_id */ +static int +kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev, + struct kasumi_qp *qp) +{ + unsigned n = snprintf(qp->name, sizeof(qp->name), + "kasumi_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n > sizeof(qp->name)) + return -1; + + return 0; +} + +/** Create a ring to place processed ops on */ +static struct rte_ring * +kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp, + unsigned ring_size, int socket_id) +{ + struct rte_ring *r; + + r = rte_ring_lookup(qp->name); + if (r) { + if (r->prod.size == ring_size) { + KASUMI_LOG_INFO("Reusing existing ring %s" + " for processed packets", + qp->name); + return r; + } + + KASUMI_LOG_ERR("Unable to reuse existing ring %s" + " for processed packets", + qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); +} + +/** Setup a queue pair */ +static int +kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id) +{ + struct kasumi_qp *qp = NULL; + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + kasumi_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return (-ENOMEM); + + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + if (kasumi_pmd_qp_set_unique_name(dev, qp)) + goto qp_setup_cleanup; + + qp->processed_ops = kasumi_pmd_qp_create_processed_ops_ring(qp, + qp_conf->nb_descriptors, socket_id); + if (qp->processed_ops == NULL) + goto qp_setup_cleanup; + + qp->sess_mp = dev->data->session_pool; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + + return 0; + +qp_setup_cleanup: + rte_free(qp); + + return -1; +} + +/** Start queue pair */ +static int +kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint16_t queue_pair_id) +{ + return -ENOTSUP; +} + +/** Stop queue pair */ +static int +kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint16_t queue_pair_id) +{ + return -ENOTSUP; +} + +/** Return the number of allocated queue pairs */ +static uint32_t +kasumi_pmd_qp_count(struct rte_cryptodev *dev) +{ + return dev->data->nb_queue_pairs; +} + +/** Returns the size of the KASUMI session structure */ +static unsigned +kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct kasumi_session); +} + +/** Configure a KASUMI session from a crypto xform chain */ +static void * +kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_sym_xform *xform, void *sess) +{ + if (unlikely(sess == NULL)) { + KASUMI_LOG_ERR("invalid session struct"); + return NULL; + } + + if (kasumi_set_session_parameters(sess, xform) != 0) { + KASUMI_LOG_ERR("failed configure session parameters"); + return NULL; + } + + return sess; +} + +/** Clear the memory of session so it doesn't leave key material behind */ +static void +kasumi_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess) +{ + /* + * Current just resetting the whole data structure, need to investigate + * whether a more selective reset of key would be more performant + */ + if (sess) + memset(sess, 0, sizeof(struct kasumi_session)); +} + +struct rte_cryptodev_ops kasumi_pmd_ops = { + .dev_configure = kasumi_pmd_config, + .dev_start = kasumi_pmd_start, + .dev_stop = kasumi_pmd_stop, + .dev_close = kasumi_pmd_close, + + .stats_get = kasumi_pmd_stats_get, + .stats_reset = kasumi_pmd_stats_reset, + + .dev_infos_get = kasumi_pmd_info_get, + + .queue_pair_setup = kasumi_pmd_qp_setup, + .queue_pair_release = kasumi_pmd_qp_release, + .queue_pair_start = kasumi_pmd_qp_start, + .queue_pair_stop = kasumi_pmd_qp_stop, + .queue_pair_count = kasumi_pmd_qp_count, + + .session_get_size = kasumi_pmd_session_get_size, + .session_configure = kasumi_pmd_session_configure, + .session_clear = kasumi_pmd_session_clear +}; + +struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops; diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h new file mode 100644 index 00000000..04e1c437 --- /dev/null +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h @@ -0,0 +1,106 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_KASUMI_PMD_PRIVATE_H_ +#define _RTE_KASUMI_PMD_PRIVATE_H_ + +#include <sso_kasumi.h> + +#define KASUMI_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_KASUMI_PMD, \ + __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_KASUMI_DEBUG +#define KASUMI_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_KASUMI_PMD, \ + __func__, __LINE__, ## args) + +#define KASUMI_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_KASUMI_PMD, \ + __func__, __LINE__, ## args) +#else +#define KASUMI_LOG_INFO(fmt, args...) +#define KASUMI_LOG_DBG(fmt, args...) +#endif + +/** private data structure for each virtual KASUMI device */ +struct kasumi_private { + unsigned max_nb_queue_pairs; + /**< Max number of queue pairs supported by device */ + unsigned max_nb_sessions; + /**< Max number of sessions supported by device */ +}; + +/** KASUMI buffer queue pair */ +struct kasumi_qp { + uint16_t id; + /**< Queue Pair Identifier */ + char name[RTE_CRYPTODEV_NAME_LEN]; + /**< Unique Queue Pair Name */ + struct rte_ring *processed_ops; + /**< Ring for placing processed ops */ + struct rte_mempool *sess_mp; + /**< Session Mempool */ + struct rte_cryptodev_stats qp_stats; + /**< Queue pair statistics */ +} __rte_cache_aligned; + +enum kasumi_operation { + KASUMI_OP_ONLY_CIPHER, + KASUMI_OP_ONLY_AUTH, + KASUMI_OP_CIPHER_AUTH, + KASUMI_OP_AUTH_CIPHER, + KASUMI_OP_NOT_SUPPORTED +}; + +/** KASUMI private session structure */ +struct kasumi_session { + /* Keys have to be 16-byte aligned */ + sso_kasumi_key_sched_t pKeySched_cipher; + sso_kasumi_key_sched_t pKeySched_hash; + enum kasumi_operation op; + enum rte_crypto_auth_operation auth_op; +} __rte_cache_aligned; + + +int +kasumi_set_session_parameters(struct kasumi_session *sess, + const struct rte_crypto_sym_xform *xform); + + +/** device specific operations function pointer structure */ +struct rte_cryptodev_ops *rte_kasumi_pmd_ops; + +#endif /* _RTE_KASUMI_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/kasumi/rte_pmd_kasumi_version.map b/drivers/crypto/kasumi/rte_pmd_kasumi_version.map new file mode 100644 index 00000000..8ffeca93 --- /dev/null +++ b/drivers/crypto/kasumi/rte_pmd_kasumi_version.map @@ -0,0 +1,3 @@ +DPDK_16.07 { + local: *; +}; diff --git a/drivers/crypto/null/Makefile b/drivers/crypto/null/Makefile index 2173277b..c143929f 100644 --- a/drivers/crypto/null/Makefile +++ b/drivers/crypto/null/Makefile @@ -54,6 +54,8 @@ SYMLINK-y-include += # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_ring DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_cryptodev include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index 258c2d5f..20a70d4a 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -38,6 +38,7 @@ LIBABIVER := 1 # build flags CFLAGS += $(WERROR_FLAGS) +CFLAGS += -O3 # external library include paths CFLAGS += -I$(SRCDIR)/qat_adf @@ -58,6 +59,7 @@ EXPORT_MAP := rte_pmd_qat_version.map # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mempool DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_cryptodev diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index bcccdf4f..aa108d47 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -616,10 +616,11 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, * Write (the length of AAD) into bytes 16-19 of state2 * in big-endian format. This field is 8 bytes */ - *(uint32_t *)&(hash->sha.state1[ + uint32_t *aad_len = (uint32_t *)&hash->sha.state1[ ICP_QAT_HW_GALOIS_128_STATE1_SZ + - ICP_QAT_HW_GALOIS_H_SZ]) = - rte_bswap32(add_auth_data_length); + ICP_QAT_HW_GALOIS_H_SZ]; + *aad_len = rte_bswap32(add_auth_data_length); + proto = ICP_QAT_FW_LA_GCM_PROTO; } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) { proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 495ea1c7..940b2b63 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -263,6 +263,26 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { }, } }, } }, + { /* AES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; @@ -276,14 +296,15 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session) { struct qat_session *sess = session; - phys_addr_t cd_paddr = sess->cd_paddr; + phys_addr_t cd_paddr; PMD_INIT_FUNC_TRACE(); if (session) { + cd_paddr = sess->cd_paddr; memset(sess, 0, qat_crypto_sym_get_session_private_size(dev)); - sess->cd_paddr = cd_paddr; - } + } else + PMD_DRV_LOG(ERR, "NULL session"); } static int @@ -368,6 +389,14 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; break; + case RTE_CRYPTO_CIPHER_AES_CTR: + if (qat_alg_validate_aes_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: if (qat_alg_validate_snow3g_key(cipher_xform->key.length, &session->qat_cipher_alg) != 0) { @@ -380,7 +409,6 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_3DES_CBC: case RTE_CRYPTO_CIPHER_AES_ECB: - case RTE_CRYPTO_CIPHER_AES_CTR: case RTE_CRYPTO_CIPHER_AES_CCM: case RTE_CRYPTO_CIPHER_KASUMI_F8: PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u", @@ -807,12 +835,15 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) return data - mult; } -void qat_crypto_sym_session_init(struct rte_mempool *mp, void *priv_sess) +void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess) { - struct qat_session *s = priv_sess; + struct rte_cryptodev_sym_session *sess = sym_sess; + struct qat_session *s = (void *)sess->_private; PMD_INIT_FUNC_TRACE(); - s->cd_paddr = rte_mempool_virt2phy(mp, &s->cd); + s->cd_paddr = rte_mempool_virt2phy(mp, sess) + + offsetof(struct qat_session, cd) + + offsetof(struct rte_cryptodev_sym_session, _private); } int qat_dev_config(__rte_unused struct rte_cryptodev *dev) diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c index a7912f5a..f46ec857 100644 --- a/drivers/crypto/qat/rte_qat_cryptodev.c +++ b/drivers/crypto/qat/rte_qat_cryptodev.c @@ -69,10 +69,7 @@ static struct rte_cryptodev_ops crypto_qat_ops = { static struct rte_pci_id pci_id_qat_map[] = { { - .vendor_id = 0x8086, - .device_id = 0x0443, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(0x8086, 0x0443), }, {.device_id = 0}, }; diff --git a/drivers/crypto/snow3g/Makefile b/drivers/crypto/snow3g/Makefile index ee582702..bea6760b 100644 --- a/drivers/crypto/snow3g/Makefile +++ b/drivers/crypto/snow3g/Makefile @@ -30,8 +30,10 @@ include $(RTE_SDK)/mk/rte.vars.mk -ifeq ($(LIBSSO_PATH),) -$(error "Please define LIBSSO_PATH environment variable") +ifneq ($(MAKECMDGOALS),clean) +ifeq ($(LIBSSO_SNOW3G_PATH),) +$(error "Please define LIBSSO_SNOW3G_PATH environment variable") +endif endif # library name @@ -47,10 +49,11 @@ LIBABIVER := 1 # versioning export map EXPORT_MAP := rte_pmd_snow3g_version.map -# external library include paths -CFLAGS += -I$(LIBSSO_PATH) -CFLAGS += -I$(LIBSSO_PATH)/include -CFLAGS += -I$(LIBSSO_PATH)/build +# external library dependencies +CFLAGS += -I$(LIBSSO_SNOW3G_PATH) +CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/include +CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/build +LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g # library source files SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c @@ -59,6 +62,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd_ops.c # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_ring DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_cryptodev include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c index f3e0e667..dc8de6bd 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c @@ -38,10 +38,11 @@ #include <rte_dev.h> #include <rte_malloc.h> #include <rte_cpuflags.h> -#include <rte_kvargs.h> #include "rte_snow3g_pmd_private.h" +#define SNOW3G_IV_LENGTH 16 +#define SNOW3G_DIGEST_LENGTH 4 #define SNOW3G_MAX_BURST 8 #define BYTE_LEN 8 @@ -198,20 +199,12 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops, for (i = 0; i < num_ops; i++) { /* Sanity checks. */ - if (ops[i]->sym->cipher.iv.length != 16) { + if (unlikely(ops[i]->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; SNOW3G_LOG_ERR("iv"); break; } - if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0) - || ((ops[i]->sym->cipher.data.offset - % BYTE_LEN) != 0)) { - ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - SNOW3G_LOG_ERR("Data Length or offset"); - break; - } - src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->cipher.data.offset >> 3); dst[i] = ops[i]->sym->m_dst ? @@ -231,6 +224,39 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops, return processed_ops; } +/** Encrypt/decrypt mbuf (bit level function). */ +static uint8_t +process_snow3g_cipher_op_bit(struct rte_crypto_op *op, + struct snow3g_session *session) +{ + uint8_t *src, *dst; + uint8_t *IV; + uint32_t length_in_bits, offset_in_bits; + + /* Sanity checks. */ + if (unlikely(op->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + SNOW3G_LOG_ERR("iv"); + return 0; + } + + offset_in_bits = op->sym->cipher.data.offset; + src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + if (op->sym->m_dst == NULL) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + SNOW3G_LOG_ERR("bit-level in-place not supported\n"); + return 0; + } + dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); + IV = op->sym->cipher.iv.data; + length_in_bits = op->sym->cipher.data.length; + + sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV, + src, dst, length_in_bits, offset_in_bits); + + return 1; +} + /** Generate/verify hash from mbufs with same hash key. */ static int process_snow3g_hash_op(struct rte_crypto_op **ops, @@ -243,23 +269,22 @@ process_snow3g_hash_op(struct rte_crypto_op **ops, uint32_t length_in_bits; for (i = 0; i < num_ops; i++) { - if (ops[i]->sym->auth.aad.length != 16) { + if (unlikely(ops[i]->sym->auth.aad.length != SNOW3G_IV_LENGTH)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; SNOW3G_LOG_ERR("aad"); break; } - if (ops[i]->sym->auth.digest.length != 4) { + if (unlikely(ops[i]->sym->auth.digest.length != SNOW3G_DIGEST_LENGTH)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; SNOW3G_LOG_ERR("digest"); break; } - if (((ops[i]->sym->auth.data.length % BYTE_LEN) != 0) - || ((ops[i]->sym->auth.data.offset - % BYTE_LEN) != 0)) { + /* Data must be byte aligned */ + if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - SNOW3G_LOG_ERR("Data Length or offset"); + SNOW3G_LOG_ERR("Offset"); break; } @@ -299,10 +324,11 @@ process_snow3g_hash_op(struct rte_crypto_op **ops, /** Process a batch of crypto ops which shares the same session. */ static int process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, - struct snow3g_qp *qp, uint8_t num_ops) + struct snow3g_qp *qp, uint8_t num_ops, + uint16_t *accumulated_enqueued_ops) { unsigned i; - unsigned processed_ops; + unsigned enqueued_ops, processed_ops; switch (session->op) { case SNOW3G_OP_ONLY_CIPHER: @@ -342,7 +368,63 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, } } - return processed_ops; + enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops, + (void **)ops, processed_ops); + qp->qp_stats.enqueued_count += enqueued_ops; + *accumulated_enqueued_ops += enqueued_ops; + + return enqueued_ops; +} + +/** Process a crypto op with length/offset in bits. */ +static int +process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session, + struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops) +{ + unsigned enqueued_op, processed_op; + + switch (session->op) { + case SNOW3G_OP_ONLY_CIPHER: + processed_op = process_snow3g_cipher_op_bit(op, + session); + break; + case SNOW3G_OP_ONLY_AUTH: + processed_op = process_snow3g_hash_op(&op, session, 1); + break; + case SNOW3G_OP_CIPHER_AUTH: + processed_op = process_snow3g_cipher_op_bit(op, session); + if (processed_op == 1) + process_snow3g_hash_op(&op, session, 1); + break; + case SNOW3G_OP_AUTH_CIPHER: + processed_op = process_snow3g_hash_op(&op, session, 1); + if (processed_op == 1) + process_snow3g_cipher_op_bit(op, session); + break; + default: + /* Operation not supported. */ + processed_op = 0; + } + + /* + * If there was no error/authentication failure, + * change status to successful. + */ + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + /* Free session if a session-less crypto op. */ + if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + rte_mempool_put(qp->sess_mp, op->sym->session); + op->sym->session = NULL; + } + + enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, + (void **)&op, processed_op); + qp->qp_stats.enqueued_count += enqueued_op; + *accumulated_enqueued_ops += enqueued_op; + + return enqueued_op; } static uint16_t @@ -354,7 +436,7 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, struct snow3g_session *prev_sess = NULL, *curr_sess = NULL; struct snow3g_qp *qp = queue_pair; - unsigned i, n; + unsigned i; uint8_t burst_size = 0; uint16_t enqueued_ops = 0; uint8_t processed_ops; @@ -370,8 +452,32 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) { curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; - return enqueued_ops; + break; + } + + /* If length/offset is at bit-level, process this buffer alone. */ + if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0) + || ((curr_c_op->sym->cipher.data.offset + % BYTE_LEN) != 0)) { + /* Process the ops of the previous session. */ + if (prev_sess != NULL) { + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; + } + + burst_size = 0; + prev_sess = NULL; + } + + processed_ops = process_op_bit(curr_c_op, curr_sess, + qp, &enqueued_ops); + if (processed_ops != 1) + break; + + continue; } /* Batch ops that share the same session. */ @@ -385,20 +491,14 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, * process them, and start a new batch. */ if (burst_size == SNOW3G_MAX_BURST) { - processed_ops = process_ops(c_ops, - prev_sess, qp, burst_size); - n = rte_ring_enqueue_burst(qp->processed_ops, - (void **)c_ops, - processed_ops); - qp->qp_stats.enqueued_count += n; - enqueued_ops += n; - if (n < burst_size) { - qp->qp_stats.enqueue_err_count += - nb_ops - enqueued_ops; - return enqueued_ops; + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; } - burst_size = 0; + burst_size = 0; prev_sess = NULL; } } else { @@ -406,41 +506,27 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, * Different session, process the ops * of the previous session. */ - processed_ops = process_ops(c_ops, - prev_sess, qp, burst_size); - n = rte_ring_enqueue_burst(qp->processed_ops, - (void **)c_ops, - processed_ops); - qp->qp_stats.enqueued_count += n; - enqueued_ops += n; - if (n < burst_size) { - qp->qp_stats.enqueue_err_count += - nb_ops - enqueued_ops; - return enqueued_ops; + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; } - burst_size = 0; + burst_size = 0; prev_sess = curr_sess; + c_ops[burst_size++] = curr_c_op; } } if (burst_size != 0) { /* Process the crypto ops of the last session. */ - processed_ops = process_ops(c_ops, - prev_sess, qp, burst_size); - n = rte_ring_enqueue_burst(qp->processed_ops, - (void **)c_ops, - processed_ops); - qp->qp_stats.enqueued_count += n; - enqueued_ops += n; - if (n < burst_size) { - qp->qp_stats.enqueue_err_count += - nb_ops - enqueued_ops; - return enqueued_ops; - } + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); } + qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; return enqueued_ops; } @@ -468,6 +554,16 @@ cryptodev_snow3g_create(const char *name, struct rte_cryptodev *dev; char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; struct snow3g_private *internals; + uint64_t cpu_flags = 0; + + /* Check CPU for supported vector instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) + cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + else { + SNOW3G_LOG_ERR("Vector instructions are not supported by CPU"); + return -EFAULT; + } + /* Create a unique device name. */ if (create_unique_device_name(crypto_dev_name, @@ -491,7 +587,8 @@ cryptodev_snow3g_create(const char *name, dev->enqueue_burst = snow3g_pmd_enqueue_burst; dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + cpu_flags; internals = dev->data->dev_private; |